[{"data":1,"prerenderedAt":1916},["ShallowReactive",2],{"/en-us/blog/tags/user-stories/":3,"navigation-en-us":20,"banner-en-us":450,"footer-en-us":467,"user stories-tag-page-en-us":677},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/user-stories","tags",false,"",{"tag":9,"tagSlug":10},"user stories","user-stories",{"template":12},"BlogTag","content:en-us:blog:tags:user-stories.yml","yaml","User Stories","content","en-us/blog/tags/user-stories.yml","en-us/blog/tags/user-stories","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":446,"_type":14,"title":447,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":377,"minimal":408,"duo":427,"pricingDeployment":436},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,187,192,298,358],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":169},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,148],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,138,143],{"text":134,"config":135},"Application Security Testing",{"href":136,"dataGaName":137,"dataGaLocation":28},"/solutions/application-security-testing/","Application security testing",{"text":139,"config":140},"Software Supply Chain Security",{"href":141,"dataGaLocation":28,"dataGaName":142},"/solutions/supply-chain/","Software supply chain security",{"text":144,"config":145},"Software Compliance",{"href":146,"dataGaName":147,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":149,"link":150,"items":155},"Measurement",{"config":151},{"icon":152,"href":153,"dataGaName":154,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[156,160,164],{"text":157,"config":158},"Visibility & Measurement",{"href":153,"dataGaLocation":28,"dataGaName":159},"Visibility and Measurement",{"text":161,"config":162},"Value Stream Management",{"href":163,"dataGaLocation":28,"dataGaName":161},"/solutions/value-stream-management/",{"text":165,"config":166},"Analytics & Insights",{"href":167,"dataGaLocation":28,"dataGaName":168},"/solutions/analytics-and-insights/","Analytics and insights",{"title":170,"items":171},"GitLab for",[172,177,182],{"text":173,"config":174},"Enterprise",{"href":175,"dataGaLocation":28,"dataGaName":176},"/enterprise/","enterprise",{"text":178,"config":179},"Small Business",{"href":180,"dataGaLocation":28,"dataGaName":181},"/small-business/","small business",{"text":183,"config":184},"Public Sector",{"href":185,"dataGaLocation":28,"dataGaName":186},"/solutions/public-sector/","public sector",{"text":188,"config":189},"Pricing",{"href":190,"dataGaName":191,"dataGaLocation":28,"dataNavLevelOne":191},"/pricing/","pricing",{"text":193,"config":194,"link":196,"lists":200,"feature":285},"Resources",{"dataNavLevelOne":195},"resources",{"text":197,"config":198},"View all resources",{"href":199,"dataGaName":195,"dataGaLocation":28},"/resources/",[201,234,257],{"title":202,"items":203},"Getting started",[204,209,214,219,224,229],{"text":205,"config":206},"Install",{"href":207,"dataGaName":208,"dataGaLocation":28},"/install/","install",{"text":210,"config":211},"Quick start guides",{"href":212,"dataGaName":213,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":215,"config":216},"Learn",{"href":217,"dataGaLocation":28,"dataGaName":218},"https://university.gitlab.com/","learn",{"text":220,"config":221},"Product documentation",{"href":222,"dataGaName":223,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":225,"config":226},"Best practice videos",{"href":227,"dataGaName":228,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":230,"config":231},"Integrations",{"href":232,"dataGaName":233,"dataGaLocation":28},"/integrations/","integrations",{"title":235,"items":236},"Discover",[237,242,247,252],{"text":238,"config":239},"Customer success stories",{"href":240,"dataGaName":241,"dataGaLocation":28},"/customers/","customer success stories",{"text":243,"config":244},"Blog",{"href":245,"dataGaName":246,"dataGaLocation":28},"/blog/","blog",{"text":248,"config":249},"Remote",{"href":250,"dataGaName":251,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":253,"config":254},"TeamOps",{"href":255,"dataGaName":256,"dataGaLocation":28},"/teamops/","teamops",{"title":258,"items":259},"Connect",[260,265,270,275,280],{"text":261,"config":262},"GitLab Services",{"href":263,"dataGaName":264,"dataGaLocation":28},"/services/","services",{"text":266,"config":267},"Community",{"href":268,"dataGaName":269,"dataGaLocation":28},"/community/","community",{"text":271,"config":272},"Forum",{"href":273,"dataGaName":274,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":276,"config":277},"Events",{"href":278,"dataGaName":279,"dataGaLocation":28},"/events/","events",{"text":281,"config":282},"Partners",{"href":283,"dataGaName":284,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":286,"textColor":287,"text":288,"image":289,"link":293},"#2f2a6b","#fff","Insights for the future of software development",{"altText":290,"config":291},"the source promo card",{"src":292},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":294,"config":295},"Read the latest",{"href":296,"dataGaName":297,"dataGaLocation":28},"/the-source/","the source",{"text":299,"config":300,"lists":302},"Company",{"dataNavLevelOne":301},"company",[303],{"items":304},[305,310,316,318,323,328,333,338,343,348,353],{"text":306,"config":307},"About",{"href":308,"dataGaName":309,"dataGaLocation":28},"/company/","about",{"text":311,"config":312,"footerGa":315},"Jobs",{"href":313,"dataGaName":314,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":314},{"text":276,"config":317},{"href":278,"dataGaName":279,"dataGaLocation":28},{"text":319,"config":320},"Leadership",{"href":321,"dataGaName":322,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":324,"config":325},"Team",{"href":326,"dataGaName":327,"dataGaLocation":28},"/company/team/","team",{"text":329,"config":330},"Handbook",{"href":331,"dataGaName":332,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":334,"config":335},"Investor relations",{"href":336,"dataGaName":337,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":339,"config":340},"Trust Center",{"href":341,"dataGaName":342,"dataGaLocation":28},"/security/","trust center",{"text":344,"config":345},"AI Transparency Center",{"href":346,"dataGaName":347,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":349,"config":350},"Newsletter",{"href":351,"dataGaName":352,"dataGaLocation":28},"/company/contact/","newsletter",{"text":354,"config":355},"Press",{"href":356,"dataGaName":357,"dataGaLocation":28},"/press/","press",{"text":359,"config":360,"lists":361},"Contact us",{"dataNavLevelOne":301},[362],{"items":363},[364,367,372],{"text":35,"config":365},{"href":37,"dataGaName":366,"dataGaLocation":28},"talk to sales",{"text":368,"config":369},"Get help",{"href":370,"dataGaName":371,"dataGaLocation":28},"/support/","get help",{"text":373,"config":374},"Customer portal",{"href":375,"dataGaName":376,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":378,"login":379,"suggestions":386},"Close",{"text":380,"link":381},"To search repositories and projects, login to",{"text":382,"config":383},"gitlab.com",{"href":42,"dataGaName":384,"dataGaLocation":385},"search login","search",{"text":387,"default":388},"Suggestions",[389,391,395,397,401,405],{"text":57,"config":390},{"href":62,"dataGaName":57,"dataGaLocation":385},{"text":392,"config":393},"Code Suggestions (AI)",{"href":394,"dataGaName":392,"dataGaLocation":385},"/solutions/code-suggestions/",{"text":109,"config":396},{"href":111,"dataGaName":109,"dataGaLocation":385},{"text":398,"config":399},"GitLab on AWS",{"href":400,"dataGaName":398,"dataGaLocation":385},"/partners/technology-partners/aws/",{"text":402,"config":403},"GitLab on Google Cloud",{"href":404,"dataGaName":402,"dataGaLocation":385},"/partners/technology-partners/google-cloud-platform/",{"text":406,"config":407},"Why GitLab?",{"href":70,"dataGaName":406,"dataGaLocation":385},{"freeTrial":409,"mobileIcon":414,"desktopIcon":419,"secondaryButton":422},{"text":410,"config":411},"Start free trial",{"href":412,"dataGaName":33,"dataGaLocation":413},"https://gitlab.com/-/trials/new/","nav",{"altText":415,"config":416},"Gitlab Icon",{"src":417,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":415,"config":420},{"src":421,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":423,"config":424},"Get Started",{"href":425,"dataGaName":426,"dataGaLocation":413},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":428,"mobileIcon":432,"desktopIcon":434},{"text":429,"config":430},"Learn more about GitLab Duo",{"href":62,"dataGaName":431,"dataGaLocation":413},"gitlab duo",{"altText":415,"config":433},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":435},{"src":421,"dataGaName":418,"dataGaLocation":413},{"freeTrial":437,"mobileIcon":442,"desktopIcon":444},{"text":438,"config":439},"Back to pricing",{"href":190,"dataGaName":440,"dataGaLocation":413,"icon":441},"back to pricing","GoBack",{"altText":415,"config":443},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":445},{"src":421,"dataGaName":418,"dataGaLocation":413},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":452,"button":453,"image":458,"config":462,"_id":464,"_type":14,"_source":16,"_file":465,"_stem":466,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":454,"config":455},"Try the Beta",{"href":456,"dataGaName":457,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":459,"config":460},"GitLab Duo Agent Platform",{"src":461},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":463},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":468,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":469,"_id":673,"_type":14,"title":674,"_source":16,"_file":675,"_stem":676,"_extension":19},"/shared/en-us/main-footer",{"text":470,"source":471,"edit":477,"contribute":482,"config":487,"items":492,"minimal":665},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":472,"config":473},"View page source",{"href":474,"dataGaName":475,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":478,"config":479},"Edit this page",{"href":480,"dataGaName":481,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":483,"config":484},"Please contribute",{"href":485,"dataGaName":486,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":488,"facebook":489,"youtube":490,"linkedin":491},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[493,516,572,601,635],{"title":46,"links":494,"subMenu":499},[495],{"text":496,"config":497},"DevSecOps platform",{"href":55,"dataGaName":498,"dataGaLocation":476},"devsecops platform",[500],{"title":188,"links":501},[502,506,511],{"text":503,"config":504},"View plans",{"href":190,"dataGaName":505,"dataGaLocation":476},"view plans",{"text":507,"config":508},"Why Premium?",{"href":509,"dataGaName":510,"dataGaLocation":476},"/pricing/premium/","why premium",{"text":512,"config":513},"Why Ultimate?",{"href":514,"dataGaName":515,"dataGaLocation":476},"/pricing/ultimate/","why ultimate",{"title":517,"links":518},"Solutions",[519,524,526,528,533,538,542,545,549,554,556,559,562,567],{"text":520,"config":521},"Digital transformation",{"href":522,"dataGaName":523,"dataGaLocation":476},"/topics/digital-transformation/","digital transformation",{"text":134,"config":525},{"href":136,"dataGaName":134,"dataGaLocation":476},{"text":123,"config":527},{"href":105,"dataGaName":106,"dataGaLocation":476},{"text":529,"config":530},"Agile development",{"href":531,"dataGaName":532,"dataGaLocation":476},"/solutions/agile-delivery/","agile delivery",{"text":534,"config":535},"Cloud transformation",{"href":536,"dataGaName":537,"dataGaLocation":476},"/topics/cloud-native/","cloud transformation",{"text":539,"config":540},"SCM",{"href":119,"dataGaName":541,"dataGaLocation":476},"source code management",{"text":109,"config":543},{"href":111,"dataGaName":544,"dataGaLocation":476},"continuous integration & delivery",{"text":546,"config":547},"Value stream management",{"href":163,"dataGaName":548,"dataGaLocation":476},"value stream management",{"text":550,"config":551},"GitOps",{"href":552,"dataGaName":553,"dataGaLocation":476},"/solutions/gitops/","gitops",{"text":173,"config":555},{"href":175,"dataGaName":176,"dataGaLocation":476},{"text":557,"config":558},"Small business",{"href":180,"dataGaName":181,"dataGaLocation":476},{"text":560,"config":561},"Public sector",{"href":185,"dataGaName":186,"dataGaLocation":476},{"text":563,"config":564},"Education",{"href":565,"dataGaName":566,"dataGaLocation":476},"/solutions/education/","education",{"text":568,"config":569},"Financial services",{"href":570,"dataGaName":571,"dataGaLocation":476},"/solutions/finance/","financial services",{"title":193,"links":573},[574,576,578,580,583,585,587,589,591,593,595,597,599],{"text":205,"config":575},{"href":207,"dataGaName":208,"dataGaLocation":476},{"text":210,"config":577},{"href":212,"dataGaName":213,"dataGaLocation":476},{"text":215,"config":579},{"href":217,"dataGaName":218,"dataGaLocation":476},{"text":220,"config":581},{"href":222,"dataGaName":582,"dataGaLocation":476},"docs",{"text":243,"config":584},{"href":245,"dataGaName":246,"dataGaLocation":476},{"text":238,"config":586},{"href":240,"dataGaName":241,"dataGaLocation":476},{"text":248,"config":588},{"href":250,"dataGaName":251,"dataGaLocation":476},{"text":261,"config":590},{"href":263,"dataGaName":264,"dataGaLocation":476},{"text":253,"config":592},{"href":255,"dataGaName":256,"dataGaLocation":476},{"text":266,"config":594},{"href":268,"dataGaName":269,"dataGaLocation":476},{"text":271,"config":596},{"href":273,"dataGaName":274,"dataGaLocation":476},{"text":276,"config":598},{"href":278,"dataGaName":279,"dataGaLocation":476},{"text":281,"config":600},{"href":283,"dataGaName":284,"dataGaLocation":476},{"title":299,"links":602},[603,605,607,609,611,613,615,619,624,626,628,630],{"text":306,"config":604},{"href":308,"dataGaName":301,"dataGaLocation":476},{"text":311,"config":606},{"href":313,"dataGaName":314,"dataGaLocation":476},{"text":319,"config":608},{"href":321,"dataGaName":322,"dataGaLocation":476},{"text":324,"config":610},{"href":326,"dataGaName":327,"dataGaLocation":476},{"text":329,"config":612},{"href":331,"dataGaName":332,"dataGaLocation":476},{"text":334,"config":614},{"href":336,"dataGaName":337,"dataGaLocation":476},{"text":616,"config":617},"Sustainability",{"href":618,"dataGaName":616,"dataGaLocation":476},"/sustainability/",{"text":620,"config":621},"Diversity, inclusion and belonging (DIB)",{"href":622,"dataGaName":623,"dataGaLocation":476},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":339,"config":625},{"href":341,"dataGaName":342,"dataGaLocation":476},{"text":349,"config":627},{"href":351,"dataGaName":352,"dataGaLocation":476},{"text":354,"config":629},{"href":356,"dataGaName":357,"dataGaLocation":476},{"text":631,"config":632},"Modern Slavery Transparency Statement",{"href":633,"dataGaName":634,"dataGaLocation":476},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":636,"links":637},"Contact Us",[638,641,643,645,650,655,660],{"text":639,"config":640},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":476},{"text":368,"config":642},{"href":370,"dataGaName":371,"dataGaLocation":476},{"text":373,"config":644},{"href":375,"dataGaName":376,"dataGaLocation":476},{"text":646,"config":647},"Status",{"href":648,"dataGaName":649,"dataGaLocation":476},"https://status.gitlab.com/","status",{"text":651,"config":652},"Terms of use",{"href":653,"dataGaName":654,"dataGaLocation":476},"/terms/","terms of use",{"text":656,"config":657},"Privacy statement",{"href":658,"dataGaName":659,"dataGaLocation":476},"/privacy/","privacy statement",{"text":661,"config":662},"Cookie preferences",{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":666},[667,669,671],{"text":651,"config":668},{"href":653,"dataGaName":654,"dataGaLocation":476},{"text":656,"config":670},{"href":658,"dataGaName":659,"dataGaLocation":476},{"text":661,"config":672},{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":678,"featuredPost":1895,"totalPagesCount":1914,"initialPosts":1915},[679,705,728,750,770,791,815,840,863,883,904,925,946,965,985,1006,1029,1048,1069,1089,1109,1130,1151,1172,1191,1211,1232,1254,1274,1296,1316,1336,1357,1379,1398,1418,1438,1458,1478,1499,1521,1541,1561,1581,1600,1621,1641,1661,1681,1699,1718,1737,1757,1777,1796,1817,1836,1856,1876],{"_path":680,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":681,"content":689,"config":698,"_id":701,"_type":14,"title":702,"_source":16,"_file":703,"_stem":704,"_extension":19},"/en-us/blog/an-agile-approach-to-documentation-and-structure",{"title":682,"description":683,"ogTitle":682,"ogDescription":683,"noIndex":6,"ogImage":684,"ogUrl":685,"ogSiteName":686,"ogType":687,"canonicalUrls":685,"schema":688},"An Agile approach to documentation and structure","Combining flexibility and structure: why we decided to use GitLab.com for all UnscrewMe documentation and code to keep an overview, always find the relevant information quickly, and easily track progress.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670464/Blog/Hero%20Images/gitlab-loves-open-source.jpg","https://about.gitlab.com/blog/an-agile-approach-to-documentation-and-structure","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An Agile approach to documentation and structure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Goetz Buerkle\"}],\n        \"datePublished\": \"2017-12-13\",\n      }",{"title":682,"description":683,"authors":690,"heroImage":684,"date":692,"body":693,"category":694,"tags":695},[691],"Goetz Buerkle","2017-12-13","\n\nWith an idea and a name, I was ready to start working more seriously on\n[UnscrewMe](http://unscrewme.co.uk/), a simple wine tasting scheduler app. Well, almost ready – to avoid ending up with a\nmess of files and folders and stuff scattered across different devices, and\ncertainly never where I need them, my next objective was to set up a central\nlocation where I could store and organize everything flexibly.\n\n\u003C!-- more -->\n\n## GitLab – selecting simple tools\n\nI wanted to keep the overhead low and the management of the documents simple,\nyet extensible enough to cover everything I would need to get started, including\nsimple lists, longer notes, logo drafts, and also more structured technical\nconcepts and even invoices.\n\nBeing a [Certified Scrum Product Owner](https://www.scrumalliance.org/certifications/practitioners/cspo-certification) and using a [GitLab](/) instance at work, I decided to take advantage of the free private repositories and use GitLab.com for UnscrewMe. This combines the simplicity of “just” storing everything in files and folders, with the advantage of being able to use Markdown for more advanced formatting, including sub headings, nested lists and images. And all information can easily be accessed on any device, either via Git directly or the GitLab.com web interface, which also renders Markdown files nicely.\n\nIn addition, project management features of GitLab like [issues](https://docs.gitlab.com/ee/user/project/issues/), [milestones](https://docs.gitlab.com/ee/user/project/milestones/) and\n[Issue Boards](/stages-devops-lifecycle/issueboard/) would provide a useful, flexible and lightweight framework to\ntrack my progress. By defining project phases and grouping all open tasks in\nvarious ways, I could get a quick overview of what I would need to do next,\nbefore I could actually launch my Minimum Viable Product (MVP).\nUsing the full power of GitLab.com, I created a “[Group](https://docs.gitlab.com/ee/user/group/index.html)” and three separate\nrepositories: one for all the general documentation, one for the actual web\napplication, and a third for the pre-launch website.\n\n## Defining a flexible structure\n\nYou could of course call my folder structure flawed, as it is not always entirely\nclear where new content or document should go, but so far it works fine for me.\nI started with a high-level view and specified six broad areas:\n* ideas – for anything largely creative\n* concepts – for more detailed specifications and drafts\n* business – for business plans and similar documents focused on the business in general\n* roadmap – to define the main steps without immediately looking at all the details\n* design – basically, everything that is not text\n* finance – for invoices, contracts, etc.\n\nThese six folders give me enough structure and flexibility to get started,\nwithout having to think too hard about what should go where.\nA couple of years ago, I started prepending most files I create with dates,\nlike “2017–08–31\". I find that adding dates are a useful primary sorting\ncriteria when trying to get a quick overview, so I stuck with this approach for\nmy new project as well, even though it might not be the perfect match for all files.\n\n## Google Keep – enabling quick, low-barrier content generation\n\nWith a system mainly based on text files, I could use any editor. As I started\nusing [Google Keep](https://www.google.com/keep/) for personal notes a few\nmonths ago, I knew that it was flexible and reliable enough for my needs.\n\nI do have a subscription for a very stripped-down text editor, but I must admit,\nthat I don’t like the barely existing interface too much, and started using\nGoogle Keep for many tasks instead. The big benefit of Google Keep, above the\nother web services I used to rely on for writing, is the support of writing\nnotes offline. While these days you mostly have 4G, 3G or wifi anyway, even on\nholiday, I did find myself sometimes at events or in places without connectivity.\nAnd then, being able to write something offline, that would automatically be\nsynchronized as soon as I would be online again, proved rather useful.\n\nThe only obvious drawback for me now is, that Google Keep does not support\nMarkdown for structure and formatting. But as Markdown markup is pretty minimal\nand easy to read, this hasn’t been much of a limitation.\n\nThe notes editor is simple and fast – I do not really need anything more\nadvanced or complicated. What I do value though it the possibility to add labels,\njust a different name for tags, and colors to notes. That way I can easily\ngroup my project notes together and even find the ones I am looking for quickly\nin my main view.\n\n## Visual Studio Code – lightweight editing with Markdown preview and Git support\n\nTo get my basic notes from Google Keep into GitLab, I used [Visual Studio Code](https://code.visualstudio.com/).\nIt is a simple editor with many useful plugins, making editing and checking\nMarkdown documents very convenient and supporting Git out of the box, which was\npretty much all I needed.\n\nOften, my Google Keep notes require just a little bit of cleanup, before they\nare ready to be committed to the Git repository.\nAs I use GitLab milestones and issues to structure all the work, I could also\ntake advantage of this when adding documents to the Git repository and making\nchanges. So I also reference the relevant issues in my commit messages using\n[GitLab Flavored Markdown](https://docs.gitlab.com/ee/user/markdown.html#gitlab-flavored-markdown-gfm) syntax.\n\nNext on my todo list was to [create a simple pre-launch website](https://medium.com/unscrewme/claiming-the-name-257b59d979b)\nto announce the new service, even before it was built. I did read a few times\nthat building a pre-launch website before starting to work on the application\ncode can help to gauge if there even is enough interest for the product. In my\ncase, I was not too concerned about this aspect, since first and foremost, I\nwanted to use my service, therefore by definition it would be worth the effort.\n\n*(I began writing this overview at [Pantry Marylebone](https://www.pantrymarylebone.com/)\nand finished it there too, a few days later. I wrote the final paragraphs there\nafter having had three wines at [108 Brasserie](http://108brasserie.com/) before:\na beautiful and well-balanced 2016 Picpoul de Pinet from Domaine Felines Jourdan\nin Languedoc in France, a surprisingly light and smooth 2016 Montepulciano\nd’Abruzzo from Il Faggio in Italy and a somewhat harsh and slightly disappointing\n2016 Beaujolais Vieilles Vignes par Vincent Fontaine from Domaine de la Rocailler, in France.)*\n\n## About the Guest Author\n\nGoetz Buerkle is currently working to launch UnscrewMe. There are so many wine\ntastings happening in London every day – UnscrewMe wants to help Londoners spend\nless time searching for wine events and more time tasting interesting wines\ninstead. [Keep up with the project](http://unscrewme.co.uk/).\n\n\n*[An Agile approach to documentation and structure](https://medium.com/unscrewme/an-agile-approach-to-documentation-and-structure-5fe4a14a6f2f) was originally published on Medium.*\n","open-source",[696,697,9],"agile","open source",{"slug":699,"featured":6,"template":700},"an-agile-approach-to-documentation-and-structure","BlogPost","content:en-us:blog:an-agile-approach-to-documentation-and-structure.yml","An Agile Approach To Documentation And Structure","en-us/blog/an-agile-approach-to-documentation-and-structure.yml","en-us/blog/an-agile-approach-to-documentation-and-structure",{"_path":706,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":707,"content":713,"config":722,"_id":724,"_type":14,"title":725,"_source":16,"_file":726,"_stem":727,"_extension":19},"/en-us/blog/arctic-engine-fuzz-testing-blog",{"title":708,"description":709,"ogTitle":708,"ogDescription":709,"noIndex":6,"ogImage":710,"ogUrl":711,"ogSiteName":686,"ogType":687,"canonicalUrls":711,"schema":712},"How Arctic Engine uses GitLab's fuzz testing","Using GitLab's fuzz testing, we discovered and fixed various real defects that could crash our software. Now we can detect vulnerabilities before merging the code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681504/Blog/Hero%20Images/arcticengine.png","https://about.gitlab.com/blog/arctic-engine-fuzz-testing-blog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Arctic Engine uses GitLab's fuzz testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Huldra\"}],\n        \"datePublished\": \"2020-08-19\",\n      }",{"title":708,"description":709,"authors":714,"heroImage":710,"date":716,"body":717,"category":718,"tags":719},[715],"Huldra","2020-08-19","{::options parse_block_html=\"true\" /}\n\n\n\n\n## About Arctic Engine\n\n\n[Arctic Engine](https://gitlab.com/huldra/arctic) is an open-source, free\ngame\n\nengine released under the [MIT\nlicense](https://opensource.org/licenses/MIT).\n\nArctic Engine is implemented in C++ and focuses on simplicity. Being a C++\n\nprogrammer and making games should not be joyless, disillusioning, and\n\ndiscouraging. In the '80s and '90s, a programmer could make games alone, and\n\nit was fun. Arctic Engine aims at making game development in C++ fun again.\n\n\n## Testing can be fun\n\n\nTesting the game engine is very important since games are usually no more\n\nrobust and performant than the underlying middleware or game engine. Writing\n\ntests by hand is time-consuming and disillusioning, and it may drain the fun\n\nfrom the development process. So, to my shame, I avoided writing tests in\nevery\n\nway I could. For instance, I used static analyzers to detect bugs. The\nproblem\n\nwith static analyzers was the lack of motivation to fix potential issues.\nYou\n\nmay be unsure whether a bug is really there, and it can sometimes be hard to\n\nfind a way to trigger it.\n\n\nThe other possibility was fuzz testing. I heard about fuzzing but didn't try\nit\n\nearlier because I thought it was hard to integrate with the project. I could\n\nnot be more wrong. It's amazing how little effort it takes to get fuzz\ntesting\n\nup and running with GitLab.\n\n\n## Fuzz testing and what it exposed\n\n\nThanks to [Sam Kerr](https://gitlab.com/stkerr) for proving me wrong about\n\nfuzzing by [actually\nfuzzing](https://gitlab.com/huldra/arctic/-/commit/946382569d88c3af7f4a7ea075c3c3cb18d3b06b)\n\nthe sound loader code. Arctic Engine allows loading a sound from a WAV file\nin\n\nmemory. To fuzz the loader's code, you create a small CPP file with a single\n\nfunction like this:\n\n\n```cpp\n\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) {\n    std::shared_ptr\u003Carctic::SoundInstance> result = arctic::LoadWav(data, size);\n    return 0;\n}\n\n```\n\n\nThen you add ``-fsanitize=fuzzer`` flag to the CMakeLists.txt file and a few\n\nlines to the `.gitlab-ci.yml` file, and the fuzzing begins! You may want to\n\ndrop in a few WAV files to the corpus folder to help the fuzzer and speed up\n\nthe process, but that's optional. Ok, it was a little harder than that with\nthe\n\nArctic Engine because it would output a message and quit upon processing\n\nunsupported file formats. Still, handling file loading errors this way was a\n\nbad idea, and I finally had a reason to fix it.\n\n\nThe fuzzer started crashing Arctic Engine: first, it triggered a signed\ninteger\n\noverflow, a division by zero, and a buffer overrun. And then, the wave\nloader\n\ngot out-of-memory while trying to resample a tiny WAV file with a sampling\nrate\n\nof 1 sample per second to 44100 samples per second. Wow.\n\n\nWhat I liked about fuzzing is that fuzzer actually crashes your program and\n\nprovides you the input so you can reproduce the crash. And once you've set\nup\n\nthe test harness, the entire testing process is fully automated, saving you\n\ntime and effort. It's like having a personal QA team, you commit your code,\nand\n\nin a few minutes, you already have it tests-covered.\n\n\nThen I fuzzed the CSV and the TGA file parsers and expected to find some\nbugs\n\nin the CSV and none in the TGA. What can I say? You may not find bugs where\nyou\n\nexpect them to be and find bugs where you thought there were none. The TGA\n\nloader crashed immediately with a buffer overrun. It did not account for\nfiles\n\ncontaining only a valid header but no actual image data after it.\n\n\n## Plans\n\n\nI will add a simple HTTP web server and some multiplayer network interaction\n\ncode to the Arctic Engine. I was putting it off for quite a while now\nbecause I\n\nthought testing would be a pain. Now that I know how easy it is to apply\n\nGitLab's fuzz testing to any data processing code, I'm very optimistic and\n\nsomewhat challenged. Like \"Can I make it withstand the fuzzer from the first\ntry?\".\n\nIt makes writing code fun for me once again.\n\n\n## Further reading\n\n\n- [GitLab's coverage-guided fuzz testing\ndocumentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#coverage-guided-fuzz-testing)\n\n- [GitLab's Fuzzing 101\nplaylist](https://www.youtube.com/playlist?list=PL05JrBw4t0KoYzW1CR-g1rMc9Xgmnhjfe)\n\n\n### About the guest author\n\n\nHuldra is a senior videogame programmer by day maintainer of the [Arctic\nEngine](https://gitlab.com/huldra/arctic) by night. She started it because\nshe wanted a game engine that kept simple things simple and made complex\nthings possible.\n","unfiltered",[109,697,720,721,9,721],"security","testing",{"slug":723,"featured":6,"template":700},"arctic-engine-fuzz-testing-blog","content:en-us:blog:arctic-engine-fuzz-testing-blog.yml","Arctic Engine Fuzz Testing Blog","en-us/blog/arctic-engine-fuzz-testing-blog.yml","en-us/blog/arctic-engine-fuzz-testing-blog",{"_path":729,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":730,"content":736,"config":744,"_id":746,"_type":14,"title":747,"_source":16,"_file":748,"_stem":749,"_extension":19},"/en-us/blog/automating-boring-git-operations-gitlab-ci",{"title":731,"description":732,"ogTitle":731,"ogDescription":732,"noIndex":6,"ogImage":733,"ogUrl":734,"ogSiteName":686,"ogType":687,"canonicalUrls":734,"schema":735},"GitBot – automating boring Git operations with CI","Guest author Kristian Larsson shares how he automates some common Git operations, like rebase, using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672374/Blog/Hero%20Images/gitbot-automate-git-operations.jpg","https://about.gitlab.com/blog/automating-boring-git-operations-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitBot – automating boring Git operations with CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristian Larsson\"}],\n        \"datePublished\": \"2017-11-02\",\n      }",{"title":731,"description":732,"authors":737,"heroImage":733,"date":739,"body":740,"category":741,"tags":742},[738],"Kristian Larsson","2017-11-02","Git is super useful for anyone doing a bit of development work or just\ntrying to\n\nkeep track of a bunch of text files. However, as your project grows you\nmight\n\nfind yourself doing lots of boring repetitive work just around Git itself.\nAt\n\nleast that’s what happened to me and so I automated some boring Git stuff\nusing our\n\n[continuous integration (CI) system](/solutions/continuous-integration/).\n\n\n\u003C!-- more -->\n\n\nThere are probably all sorts of use cases for automating various Git\noperations\n\nbut I’ll talk about a few that I’ve encountered. We’re using GitLab and\n[GitLab\n\nCI](/solutions/continuous-integration/) so that’s what my examples\n\nwill include, but most of the concepts should apply to other systems as\nwell.\n\n\n## Automatic rebase\n\n\nWe have some Git repos with source code that we receive from vendors, who we\ncan think\n\nof as our `upstream`. We don’t actually share a Git repo with the vendor but\n\nrather we get a tar ball every now and then. The tar ball is extracted into\na\n\nGit repository, on the `master` branch which thus tracks the software as it\nis\n\nreceived from upstream. In a perfect world the software we receive would be\n\nfeature complete and bug free and so we would be done, but that’s usually\nnot\n\nthe case. We do find bugs and if they are blocking we might decide to\nimplement\n\na patch to fix them ourselves. The same is true for new features where we\nmight\n\nnot want to wait for the vendor to implement it.\n\n\nThe result is that we have some local patches to apply. We commit such\npatches\n\nto a separate branch, commonly named `ts` (for TeraStream), to keep them\n\nseparate from the official software. Whenever a new software version is\nreleased,\n\nwe extract its content to `master` and then rebase our `ts` branch onto\n`master`\n\nso we get all the new official features together with our patches. Once\nwe’ve\n\nimplemented something we usually send it upstream to the vendor for\ninclusion.\n\nSometimes they include our patches verbatim so that the next version of the\ncode\n\nwill include our exact patch, in which case a rebase will simply skip our\npatch.\n\nOther times there are slight or major (it might be a completely different\ndesign)\n\nchanges to the patch and then someone typically needs to sort out the\npatches\n\nmanually. Mostly though, rebasing works just fine and we don’t end up with\nconflicts.\n\n\nNow, this whole rebasing process gets a tad boring and repetitive after a\nwhile,\n\nespecially considering we have a dozen of repositories with the setup\ndescribed\n\nabove. What I recently did was to automate this using our CI system.\n\n\nThe workflow thus looks like:\n\n\n- human extracts zip file, git add + git commit on master + git push\n\n- CI runs for `master` branch\n   - clones a copy of itself into a new working directory\n   - checks out `ts` branch (the one with our patches) in working directory\n   - rebases `ts` onto `master`\n   - push `ts` back to `origin`\n- this event will now trigger a CI build for the `ts` branch\n\n- when CI runs for the `ts` branch, it will compile, test and save the\nbinary output as “build artifacts”, which can be included in other\nrepositories\n\n- GitLab CI, which is what we use, has a CI_PIPELINE_ID that we use to\nversion built container images or artifacts\n\n\nTo do this, all you need is a few lines in a .gitlab-ci.yml file,\nessentially;\n\n\n```\n\nstages:\n  - build\n  - git-robot\n\n... build jobs ...\n\n\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\nWe’ll go through the Yaml file a few lines at a time. Some basic knowledge\nabout GitLab CI is assumed.\n\n\nThis first part lists the stages of our pipeline.\n\n\n```\n\nstages:\n  - build\n  - git-robot\n  ```\n\nWe have two stages, first the `build` stage, which does whatever you want it\nto\n\ndo (ours compiles stuff, runs a few unit tests and packages it all up), then\nthe\n\n`git-robot` stage which is where we perform the rebase.\n\n\nThen there’s:\n\n\n```\n\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  ```\n\nWe define the stage in which we run followed by the only statement which\nlimits\n\nCI jobs to run only on the specified branch(es), in this case `master`.\n\n\n`allow_failure` simply allows the CI job to fail but still passing the\npipeline.\n\n\nSince we are going to clone a copy of ourselves (the repository checked out\nin\n\nCI) we need SSH and SSH keys set up. We’ll use ssh-agent with a\npassword-less key\n\nto authenticate. Generate a key using ssh-keygen, for example:\n\n\n```\n\nssh-keygen\n\n\nkll@machine ~ $ ssh-keygen -f foo\n\nGenerating public/private rsa key pair.\n\nEnter passphrase (empty for no passphrase):\n\nEnter same passphrase again:\n\nYour identification has been saved in foo.\n\nYour public key has been saved in foo.pub.\n\nThe key fingerprint is:\n\nSHA256:6s15MZJ1/kUsDU/PF2WwRGA963m6ZSwHvEJJdsRzmaA kll@machine\n\nThe key's randomart image is:\n\n+---[RSA 2048]----+\n\n|            o**.*|\n\n|           ..o**o|\n\n|           Eo o%o|\n\n|          .o.+o O|\n\n|        So oo.o+.|\n\n|       .o o.. o+o|\n\n|      .  . o..o+=|\n\n|     . o ..  .o= |\n\n|      . +.    .. |\n\n+----[SHA256]-----+\n\nkll@machine ~ $\n\n```\n\n\nAdd the public key as a deploy key under Project Settings\n\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Repository \u003Ci\nclass=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\n\nDeploy Keys. Make sure you enable write access or you won’t be able to have\nyour\n\nGit robot push commits. We then need to hand over the private key so that it\ncan\n\nbe accessed from within the CI job. We’ll use a secret environment variable\nfor\n\nthat, which you can define under Project Settings\n\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Pipelines \u003Ci\nclass=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\n\nEnvironment variables). I’ll use the environment variable GIT_SSH_PRIV_KEY\nfor this.\n\n\nNext part is the before_script:\n\n\n```\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  ```\n\nFirst ssh-agent is installed if it isn’t already. We then start up ssh-agent\nand\n\nadd the key stored in the environment variable GIT_SSH_PRIV_KEY (which we\nset up\n\npreviously). The Git user information is set and we finally create .ssh and\nadd\n\nthe known host information about our GitLab server to our known_hosts file.\nYou\n\ncan generate the gitlab-known-hosts file using the following command:\n\n\n```\n\nssh-keyscan my-gitlab-machine >> gitlab-known-hosts\n\n```\n\n\nAs the name implies, the before_script is run before the main `script` part\nand\n\nthe ssh-agent we started in the before_script will also continue to run for\nthe\n\nduration of the job. The ssh-agent information is stored in some environment\n\nvariables which are carried across from the before_script into the main\nscript,\n\nenabling it to work. It’s also possible to put this SSH setup in the main\nscript,\n\nI just thought it looked cleaner splitting it up between before_script and\nscript.\n\nNote however that it appears that after_script behaves differently so while\nit’s\n\npossible to pass environment vars from before_script to script, they do not\n\nappear to be passed to after_script. Thus, if you want to do Git magic in\nthe\n\nafter_script you also need to perform the SSH setup in the after_script.\n\n\nThis brings us to the main script. In GitLab CI we already have a\nchecked-out\n\nclone of our project but that was automatically checked out by the CI system\n\nthrough the use of magic (it actually happens in a container previous to the\none\n\nwe are operating in, that has some special credentials) so we can’t really\nuse\n\nit, besides, checking out other branches and stuff would be really weird as\nit\n\ndisrupts the code we are using to do this, since that’s available in the Git\n\nrepository that’s checked out. It’s all rather meta.\n\n\nAnyway, we’ll be checking out a new Git repository where we’ll do our work,\nthen\n\nchange the current directory to the newly checked-out repository, after\nwhich\n\nwe’ll check out the `ts` branch, do the rebase and push it back to the\norigin remote.\n\n\n```\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\n… and that’s it. We’ve now automated the rebasing of a branch in our config\nfile. Occasionally it\n\nwill fail due to problems rebasing (most commonly merge conflicts) but then\nyou\n\ncan just step in and do the above steps manually and be interactively\nprompted\n\non how to handle conflicts.\n\n\n## Automatic merge requests\n\n\nAll the repositories I mentioned in the previous section are NEDs, a form of\n\ndriver for how to communicate with a certain type of device, for Cisco NSO\n(a\n\nnetwork orchestration system). We package up Cisco NSO, together with these\nNEDs\n\nand our own service code, in a container image. The build of that image is\n\nperformed in CI and we use a repository called `nso-ts` to control that\nwork.\n\n\nThe NEDs are compiled in CI from their own repository and the binaries are\nsaved\n\nas build artifacts. Those artifacts can then be pulled in the CI build of\n`nso-ts`.\n\nThe reference to which artifact to include is the name of the NED as well as\nthe\n\nbuild version. The version number of the NED is nothing more than the\npipeline\n\nid (which you’ll access in CI as ${CI_PIPELINE_ID}) and by including a\nspecific\n\nversion of the NED, rather than just use “latest” we gain a much more\nconsistent\n\nand reproducible build.\n\n\nWhenever a NED is updated a new build is run that produces new binary\nartifacts.\n\nWe probably want to use the new version but not before we test it out in CI.\nThe\n\nactual versions of NEDs to use is stored in a file in the `nso-ts`\nrepository and\n\nfollows a simple format, like this:\n\n\n```\n\nned-iosxr-yang=1234\n\nned-junos-yang=4567\n\n...\n\n```\n\n\nThus, updating the version to use is a simple job to just rewrite this text\nfile\n\nand replace the version number with a given CI_PIPELINE_ID version number.\nAgain,\n\nwhile NED updates are more seldom than updates to `nso-ts`, they do occur\nand\n\nhandling it is bloody boring. Enter automation!\n\n\n```\n\ngit-open-mr:\n  image: gitlab.dev.terastrm.net:4567/terastream/cisco-nso/ci-cisco-nso:4.2.3\n  stage: git-robot\n  only:\n    - ts\n  tags:\n    - no-docker\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:TeraStream/nso-ts.git\n    - cd nso-ts\n    - git checkout -b robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - for LIST_FILE in $(ls ../ned-package-list.* | xargs -n1 basename); do NED_BUILD=$(cat ../${LIST_FILE}); sed -i packages/${LIST_FILE} -e \"s/^${CI_PROJECT_NAME}.*/${CI_PROJECT_NAME}=${NED_BUILD}/\"; done\n    - git diff\n    - git commit -a -m \"Use ${CI_PROJECT_NAME} artifacts from pipeline ${CI_PIPELINE_ID}\"\n    - git push origin robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - HOST=${CI_PROJECT_URL} CI_COMMIT_REF_NAME=robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID} CI_PROJECT_NAME=TeraStream/nso-ts GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ../open-mr.sh\n```\n\n\nSo this time around we check out a Git repository into a separate working\n\ndirectory again, it’s just that it’s not the same Git repository as we are\n\nrunning on simply because we are trying to do changes to a repository that\nis\n\nusing the output of the repository we are running on. It doesn’t make much\nof a\n\ndifference in terms of our process. At the end, once we’ve modified the\nfiles we\n\nare interested in, we also open up a merge request on the target repository.\n\nHere we can see the MR (which is merged already) to use a new version of the\n\nNED `ned-snabbaftr-yang`.\n\n\n\u003Cimg src=\"/images/blogimages/gitbot-ned-update-mr.png\" alt=\"MR using new\nversion of NED\" style=\"width: 700px;\"/>{: .shadow}\n\n\nWhat we end up with is that whenever there is a new version of a NED, a\nsingle merge\n\nrequest is opened on our `nso-ts` repository to start using the new NED.\nThat\n\nmerge request is using changes on a new branch and CI will obviously run for\n\n`nso-ts` on this new branch, which will then test all of our code using the\nnew\n\nversion of the NED. We get a form of version pinning, with the form of\nexplicit\n\nchanges that it entails, yet it’s a rather convenient and non-cumbersome\n\nenvironment to work with thanks to all the automation.\n\n\n## Getting fancy\n\n\nWhile automatically opening an MR is sweet… we can do ~~better~~fancier. Our\n`nso-ts`\n\nrepository is based on Cisco NSO (Tail-F NCS), or actually the `nso-ts`\nDocker\n\nimage is based on a `cisco-nso` Docker image that we build in a separate\n\nrepository. We put the version of NSO as the tag of the `cisco-nso` Docker\n\nimage, so `cisco-nso:4.2.3` means Cisco NSO 4.2.3. This is what the `nso-ts`\n\nDockerfile will use in its `FROM` line.\n\n\nUpgrading to a new version of NCS is thus just a matter of rewriting the\ntag…\n\nbut what version of NCS should we use? There’s 4.2.4, 4.3.3, 4.4.2 and 4.4.3\n\navailable and I’m sure there’s some other version that will pop up its evil\n\nhead soon enough. How do I know which version to pick? And will our current\ncode\n\nwork with the new version?\n\n\nTo help myself in the choice of NCS version I implemented a script that gets\nthe\n\nREADME file of a new NCS version and cross references the list of fixed\nissues\n\nwith the issues that we currently have open in the Tail-F issue tracker. The\n\noutput of this is included in the merge request description so when I look\nat\n\nthe merge request I immediately know what bugs are fixed or new features are\n\nimplemented by moving to a specific version. Having this automatically\ngenerated\n\nfor us is… well, it’s just damn convenient. Together with actually testing\nour\n\ncode with the new version of NCS gives us confidence that an upgrade will be\nsmooth.\n\n\nHere are the merge requests currently opened by our GitBot:\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-merge-requests.png\" alt=\"Merge\nrequests automated by Git bot\" style=\"width: 700px;\"/>{: .shadow}\n\n\nWe can see how the system have generated MRs to move to all the different\n\nversions of NSO currently available. As we are currently on NSO v4.2.3\nthere’s\n\nno underlying branch for that one leading to an errored build. For the other\n\nversions though, there is a branch per version that executes the CI pipeline\nto\n\nmake sure all our code runs with this version of NSO.\n\n\nAs there have been a few commits today, these branches are behind by six\ncommits\n\nbut will be rebased this night so we get an up-to-date picture if they work\nor\n\nnot with our latest code.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-commits.png\" alt=\"Commits\"\nstyle=\"width: 700px;\"/>{: .shadow}\n\n\nIf we go back and look at one of these merge requests, we can see how the\n\ndescription includes information about what issues that we currently have\nopen\n\nwith Cisco / Tail-F would be solved by moving to this version.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description.png\" alt=\"Merge\nrequest descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\n\nThis is from v4.2.4 and as we are currently on v4.2.3 we can see that there\nare\n\nonly a few fixed issues.\n\n\nIf we instead look at v4.4.3 we can see that the list is significantly\nlonger.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description-list.png\"\nalt=\"Merge request descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\n\nPretty sweet, huh? :)\n\n\nAs this involves a bit more code I’ve put the relevant files in a [GitHub\ngist](https://gist.github.com/plajjan/42592665afd5ae045ee36220e19919aa).\n\n\n## This is the end\n\n\nIf you are reading this, chances are you already have your reasons for why\nyou\n\nwant to automate some Git operations. Hopefully I’ve provided some\ninspiration\n\nfor how to do it.\n\n\nIf not or if you just want to discuss the topic in general or have more\nspecific\n\nquestions about our setup, please do reach out to me on\n[Twitter](https://twitter.com/plajjan).\n\n\n_[This post](http://plajjan.github.io/automating-git/) was originally\npublished on [plajjan.github.io](http://plajjan.github.io/)._\n\n\n## About the Guest Author\n\n\nKristian Larsson is a network automation systems architect at Deutsche\nTelekom.\n\nHe is working on automating virtually all aspects of running TeraStream, the\n\ndesign for Deutsche Telekom's next generation fixed network, using robust\nand\n\nfault tolerant software. He is active in the IETF as well as being a\n\nrepresenting member in OpenConfig. Previous to joining Deutsche Telekom,\n\nKristian was the IP & opto network architect for Tele2's international\nbackbone\n\nnetwork.\n\n\n\"[BB-8 in action](https://unsplash.com/photos/C8VWyZhcIIU) by [Joseph\nChan](https://unsplash.com/@yulokchan) on Unsplash\n\n{: .note}\n","engineering",[109,9,743],"git",{"slug":745,"featured":6,"template":700},"automating-boring-git-operations-gitlab-ci","content:en-us:blog:automating-boring-git-operations-gitlab-ci.yml","Automating Boring Git Operations Gitlab Ci","en-us/blog/automating-boring-git-operations-gitlab-ci.yml","en-us/blog/automating-boring-git-operations-gitlab-ci",{"_path":751,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":752,"content":758,"config":764,"_id":766,"_type":14,"title":767,"_source":16,"_file":768,"_stem":769,"_extension":19},"/en-us/blog/autoscale-ci-runners",{"title":753,"description":754,"ogTitle":753,"ogDescription":754,"noIndex":6,"ogImage":755,"ogUrl":756,"ogSiteName":686,"ogType":687,"canonicalUrls":756,"schema":757},"Autoscale GitLab CI/CD runners and save 90% on EC2 costs","Guest author Max Woolf shows how his team makes big savings with an autoscaling cluster of GitLab CI/CD runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680305/Blog/Hero%20Images/autoscale-gitlab-ci-runners.jpg","https://about.gitlab.com/blog/autoscale-ci-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Autoscale GitLab CI/CD runners and save 90% on EC2 costs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Max Woolf\"}],\n        \"datePublished\": \"2017-11-23\",\n      }",{"title":753,"description":754,"authors":759,"heroImage":755,"date":761,"body":762,"category":741,"tags":763},[760],"Max Woolf","2017-11-23","At [Substrakt Health](https://substrakthealth.com/), we use continuous\nintegration workers to test our software every time new code is written and\npushed, but that computing capacity can be expensive and hard to predict.\nThis tutorial shows you how to set up an autoscaling [cluster of GitLab\nCI/CD](/topics/ci-cd/) runners using docker-machine and AWS.\n\n\n\u003C!-- more -->\n\n\nCode quality is **always** a top priority for us. We want to know that our\ncode works every time and when it stops working we want to know immediately.\nWe use [GitLab CI/CD](/solutions/continuous-integration/) to run our tests\nevery time we push new code and before every deployment. GitLab CI/CD lets\nus split this work across multiple servers and scale up and down capacity as\nrequired to keep costs down for us. This tutorial will show you how to set\nup an autoscaling CI/CD cluster for GitLab and save up to 90 percent on\ncosts using AWS EC2 Spot Instances.\n\n\nGitLab CI/CD allows us to split our jobs across multiple machines. By\ndefault, each new worker node requires some setup work to provision and\nattach it to our GitLab instance, but we can also use the autoscaling mode\nto provision a single machine and let that machine decide how much capacity\nis required and then spin up or down further instances as required.\n\n\n>**A warning**: This tutorial will not be covered entirely by the AWS free\nusage tier. It’s going to cost money to try this out.\n\n\n## Creating the spawner\n\n\nFirst off, we need a spawner machine. This runs 24/7 and checks that GitLab\nCI/CD has enough capacity to run the jobs currently in the queue. **It\ndoesn’t run any jobs itself.**\n\n\nWe use Ubuntu 16.04 LTS for our internal tooling, so just create an EC2\ninstance (*t2.micro* is enough and is included in the free tier.) Setting up\nVPCs and related subnets is out of the scope of this article, we’ll assume\nthat you’re working in the default VPC. Then we need to install a bunch of\nsoftware on our machine to set it up.\n\n\n## Installing gitlab-runner\n\n\ngitlab-runner is the main software we need to complete this task. Installing\nit on Ubuntu is really easy.\n\n\n```\n\ncurl -L\nhttps://packages.gitlab.com/install/repositories/runner/gitlab-ci-multi-runner/script.deb.sh\n| sudo bash\n\n```\n\n\n```\n\nsudo apt-get install gitlab-ci-multi-runner\n\n```\n\n\n\u003Cimg src=\"/images/blogimages/auto-scale-ci-runners-gif.gif\" alt=\"Installing\ngitlab-runner\" style=\"width: 700px;\"/>{: .shadow}\n\n\nOnce you’ve done that, register the runner on your GitLab instance. Do this\nas you normally would with any other GitLab CI/CD runner but choose\n**docker+machine** as the runner. Docker Machine is the software required to\nspin up new virtual machines and install Docker on them.\n\n\n## Installing Docker Machine\n\n\nDocker Machine is a handy bit of software that allows one host running\nDocker to spin up and provision other machines running Docker. Installing it\nis even easier:\n\n\n```\n\ncurl -L\nhttps://github.com/docker/machine/releases/download/v0.12.2/docker-machine-`uname\n-s`-`uname -m` >/tmp/docker-machine &&\n\nchmod +x /tmp/docker-machine &&\n\nsudo cp /tmp/docker-machine /usr/local/bin/docker-machine\n\n```\n\n\nThis will install the docker-machine binary in your PATH.\n\n\n## Configuring gitlab-runner\n\n\nBy default, gitlab-runner will not work in the autoscaling mode we want.\nIt’ll just run a job by default and then stop. We want to configure this\nmachine to no longer run tests but to spin up new Docker Machines as and\nwhen necessary. Open your gitlab-runner config file, usually found in\n`/etc/gitlab-runner/config.toml` and make some changes. This is our example\n(with sensitive information removed). Let’s go through some of the important\nlines.\n\n\n```\n\nconcurrent = 12\n\ncheck_interval = 0\n\n\n[[runners]]\n  name = \"aws-gitlab-runner-spawner\"\n  limit = 6\n  url = \"https://git.substrakt.com/ci\"\n  token = \"xxxxx\"\n  executor = \"docker+machine\"\n  [runners.docker]\n    tls_verify = false\n    image = \"ruby:2.3.1\"\n    privileged = true\n    disable_cache = false\n    volumes = [\"/cache\"]\n    shm_size = 0\n  [runners.machine]\n    IdleCount = 0\n    MachineDriver = \"amazonec2\"\n    MachineName = \"runner-%s\"\n    MachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\", \"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\", \"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\", \"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\", \"amazonec2-zone=a\", \"amazonec2-root-size=32\", \"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n    IdleTime = 1800\n```\n\n\n```\n\nconcurrent = 12\n\n```\n\n\nThis tells GitLab CI/CD that in total, it should attempt to run 12 jobs\nsimultaneously across all child workers.\n\n\n```\n\nlimit = 6\n\n```\n\n\nThis tells GitLab CI/CD that in total, it should use for running jobs a\nmaximum of six worker nodes. You’ll need to tweak this value depending on\nthe resources your jobs need and the resources of your child nodes. There’s\nno right answer here but generally we found it wasn’t a good idea to have\nmore than the number of CPUs – 1 of jobs running per node but again this is\na bit of a ‘finger-in-the-air’ calculation as it really depends on your tech\nstack.\n\n\n```\n\nIdleCount = 0\n\n```\n\n\nThis tells GitLab CI/CD not to run any machines constantly (whilst idle).\nThis means when nobody is running a job, or no jobs are queued to spin down\nall of the worker nodes after an amount of time (IdleTime at the bottom of\nthe file). We power our nodes down after half an hour of no use. This does\nhave the consequence of there being a short wait when we start our day, but\nit saves us money as we’re not using computing power when it’s not required.\n\n\nIf you're interested in more about how `concurrent`, `limit` and `IdleCount`\nare defining the maximum number of jobs and nodes that will be used, you can\nfind a more detailed description in Runner's autoscale configuration\ndocument: [Autoscaling algorithm and\nparameters](https://docs.gitlab.com/runner/configuration/autoscale.html#autoscaling-algorithm-and-parameters),\n[How parameters generate the upper limit of running\nmachines](https://docs.gitlab.com/runner/configuration/autoscale.html#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines).\n\n\n```\n\nMachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\",\n\"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\",\n\"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\",\n\"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\",\n\"amazonec2-zone=a\", \"amazonec2-root-size=32\",\n\"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n\n```\n\n\nThis is where the magic happens. This is where we set our options for Docker\nMachine. It defines the size, type and price of our runners. I’ll run\nthrough each of the non-obvious options.\n\n\n```\n\namazonec2-vpc-id=vpc-xxxxx & amazonec2-subnet-id=subnet-xxxxx\n\n```\n\n\nThis is the VPC and associated subnet ID. Generally, you’d want this in your\ndefault VPC in a public subnet. We run our jobs in a private VPC with\ninternal peering connections to other VPCs due to regulatory constraints.\n\n\n```\n\namazonec2-region=eu-west-2\n\n```\n\n\nThis is the AWS region. We run all of our infrastructure in the EU (London)\nregion.\n\n\n```\n\namazonec2-instance-type=m4.xlarge\n\n```\n\n\nThis is the size of the instance we want for each of our runners. This\nsetting can have massive implications on cost and it can be a tricky\nbalancing act. Choose too small and your jobs take forever to run due to a\nlack of resources (more time = more money) but choose too large and you have\nunused compute capacity which costs you money you don’t need to spend.\nAgain, there’s no right answer here, it’s about what works for your\nworkload. We found m4.xlarge works for us.\n\n\n## Save up to 90 percent on EC2 costs using Spot Instances\n\n\nSpot Instances are magic. They allow us to bid for unused capacity in the\nAWS infrastructure and often can mean that EC2 costs can be dramatically\nlower. We’re currently seeing discounts of around 85 percent on our EC2\nbills due to using Spot Instances. Setting them up for use on GitLab CI/CD\nis really easy too. There is (of course) a downside. If our bid price for\nVMs is exceeded, then our instances shut down with only a few minutes\nnotice. But as long as our bid is high enough, this isn’t an issue. Pricing\nin the spot market is insanely complex but in eu-west-2 at least, prices for\nm4.large and xlarge instances appear to have been static for months so a bid\n10-20 percent higher than the current spot price appears to be a safe bet.\nJust keep your eyes peeled. The current spot price for an m4.xlarge instance\nis $0.026. We’ve set our maximum price at $0.03 to give us some wiggle room.\nAt time of writing, the on-demand price is $0.232. The numbers speak for\nthemselves.\n\n\n>Note: Spot pricing can vary significantly between instance sizes, regions\nand even availability zones in the same region. This guide assumes that spot\npricing won’t vary massively or that you’ve set a good buffer above the\ncurrent spot price to avoid outages.\n\n\n```\n\namazonec2-request-spot-instance=true & amazonec2-spot-price=0.03\n\n```\n\n\nThis tells GitLab CI/CD that instead of just spawning new EC2 instances at\nfull price, that it should request Spot Instances at the current spot price,\nsetting a maximum bid that it should not exceed per hour, in USD (regardless\nof what currency you’re billed in. We’re billed in GBP, but Spot Instances\nare still calculated in USD.) The maximum bid is whatever you’re comfortable\npaying. We tend to set it close to the on-demand price because we’re looking\nfor any discount. As long as we’re not paying more than we otherwise would,\nit’s fine with us. Your financial constraints may affect your decisions\ndifferently.\n\n\n>Update: From October, AWS will charge in seconds, rather than hours used,\nmaking the potential savings even higher for unused partial hours.\n\n\nWe’d love to see how you get along with this so please let us know. You can\ncontact me max [at] substrakthealth [dot] com. For us, it’s saved us time\nand money and that’s never a bad thing.\n\n\n## About the Guest Author\n\n\nMax Woolf is a Senior Developer at Substrakt Health. Based in the UK, they\nuse innovative technology to transform how primary care providers organize\nand deliver care to patients in a sustainable NHS.\n\n\n_[Autoscale GitLab CI runners and save 90% on EC2\ncosts](https://substrakthealth.com/autoscale-gitlab-ci-runners-and-save-90-on-ec2-costs/)\nwas originally published on Substrakt Health's blog._\n\n\nCover image by [Sebastien Gabriel](https://unsplash.com/@sgabriel) on\nUnsplash\n\n{: .note}\n",[9,109],{"slug":765,"featured":6,"template":700},"autoscale-ci-runners","content:en-us:blog:autoscale-ci-runners.yml","Autoscale Ci Runners","en-us/blog/autoscale-ci-runners.yml","en-us/blog/autoscale-ci-runners",{"_path":771,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":772,"content":778,"config":785,"_id":787,"_type":14,"title":788,"_source":16,"_file":789,"_stem":790,"_extension":19},"/en-us/blog/best-practices-for-kubernetes-runners",{"title":773,"description":774,"ogTitle":773,"ogDescription":774,"noIndex":6,"ogImage":775,"ogUrl":776,"ogSiteName":686,"ogType":687,"canonicalUrls":776,"schema":777},"Best practices to keep your Kubernetes runners moving","In a presentation at GitLab Commit San Francisco, a senior software engineer from F5 Networks shares some best practices for working with Kubernetes runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681341/Blog/Hero%20Images/trackandfield.jpg","https://about.gitlab.com/blog/best-practices-for-kubernetes-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Best practices to keep your Kubernetes runners moving\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-05-27\",\n      }",{"title":773,"description":774,"authors":779,"heroImage":775,"date":781,"body":782,"category":741,"tags":783},[780],"Sara Kassabian","2020-05-27","Sometimes in software engineering, you have to learn the hard way. GitLab CI\nis extremely powerful and flexible, but it’s also easy to make mistakes that\ncould take out a GitLab runner, which can clog up Sidekiq and bring down\nyour entire GitLab instance.\n\n\nLuckily, Sean Smith, senior software engineer for F5 Networks has been\nthrough it, and summarizes some of their learnings in [his talk at GitLab\nCommit San Francisco](https://www.youtube.com/watch?v=Hks5ElUxkP4). In the\npresentation, Sean goes in-depth about a past incident that clogged up F5\nNetwork's GitLab runner, and shares tips on setting limits for Kubernetes\n(K8s) runners.\n\n\nSean is a GitLab administrator for [F5 Networks](https://www.f5.com/), a\ncompany with about 1,800 users worldwide running 7,500 projects each month –\nexcluding forks. That’s roughly 350,000 - 400,000 CI jobs going through the\nK8s runners each month. Until some recent hires, there were only three\nengineers to handle it all.\n\n\nInstead of running a giant GitLab instance on one VM, F5 broke up their\ninstance into seven different servers: Two HA web servers, one PostGres\nserver, PostGres replica, Sidekiq, Gitaly (our Git filesystem), and Redis.\n\n\n## Keep your GitLab runners up and moving\n\n\nF5 uses two types of GitLab runners:\n\n\n*   Kubernetes: About 90% of F5 jobs go through K8s\n\n*   Docker: Docker machine is run on-prem and in the cloud\n\n\n**Why use Docker?** F5 uses Docker to configure cluster networks in\ndifferent jobs as well as for unit testing. Since the Docker machine can run\non-prem and also in the cloud, it’s easy to have a VM dedicated to the job\nthat allows you to manage those Docker images and Docker containers and set\nup your cluster networking topology within Docker, so you can run your tests\nand tear it down afterward without affecting other users. This isn’t\nsomething that is really possible in Kubernetes runners.\n\n\nOtherwise, F5 Networks uses Kubernetes, but keeping your K8s up and running\nisn’t necessarily foolproof.\n\n\n### CI jobs can spawn\n\n\nSometimes, a seemingly benign coding error can create unanticipated\nconsequences for your Kubernetes runners.\n\n\nOne time, an F5 Engineer decided to use a GitLab CI job to automatically\nconfigure different settings on various jobs and projects. It made sense to\nconfigure using GitLab CI because the engineer wanted to be able to use [Git\nfor version control](/topics/version-control/). Version control makes it\neasier for the team to iterate on the code transparently. He wrote the code\nto run the job.\n\n\nBut, he didn’t read the fine print in the library he was using. The code he\nwrote looked for the project ID, and if it found the project ID, runs the\npipeline once per hour at the 30-minute mark. The assumption was that if\nthere was already a matching scheduled task, the create function would not\ncreate a duplicate. Unfortunately, this was not the case. The code he ran\ncaused the number of CI jobs to grow exponentially.\n\n\n![The code that clogged the K8s runner with GitLab CI jobs for F5\nNetworks](https://about.gitlab.com/images/blogimages/problemcode.png){:\n.shadow}\n\nThe code that clogged the K8s runner with GitLab CI jobs for F5 Networks.\nCan you see the problem yet?\n\n{: .note.text-center}\n\n\n\"You schedule a job, then next you schedule another job so now you've got\ntwo jobs scheduled, and then you've got four jobs scheduled, and then eight,\nafter 10 iterations, you get around the 1,024 jobs scheduled and after\n1,532,000 jobs, if this was allowed to run for 24 hours, you would end up\nwith 16.7 million jobs being scheduled by the 24th hour,\" says Sean.\n\n\nIn short: Chaos. Remember, F5 Networks has a CI pipeline capacity of 350,000\nto 400,000 jobs per month, so 16.7 million jobs in 24 hours could easily\nclog the system, taking down the K8s nodes, as well as GitLab nodes.\n\n\nLuckily, there’s a simple enough fix. First, identify which project is\ncausing the problem, and disable CI on the project so it can’t create any\nnew jobs. Next, kill all the pending jobs by [running this\nsnippet](https://gitlab.com/snippets/1924269).\n\n\n```\n\n# gitlab-rails console\n\np = Project.find_by_full_path(‘rogue-group/rogue-project’)\n\nCi::Pipeline.where(project_id: p.id).where(status: ‘pending’).each {|p|\np.cancel}\n\nexit\n\n```\n\n\nIt’s really a judgment call whether to kill a running job or not. If a job\nis currently running and is going to take all of 30 seconds then maybe don’t\nbother killing it, but if the job is going to take 30 minutes then consider\nkilling it to free up resources for your users.\n\n\nF5 learned a lesson here and set up a monitoring alert to help ensure the\njob queue doesn’t back up like that again. The Cron job checks to make sure\nF5 is not exceeding a preestablished threshold on the number of jobs in a\npending state. The alert links to a dashboard and also includes the full\nplaybook for how to resolve the problem (because let’s face it, nobody is at\ntheir best when troubleshooting bleary-eyed at 3 a.m.). At first there were\nsome false positives, but now the alerting has been fine-tuned and the\nsystem saved F5 from two outages so far.\n\n\n### Push it to the limit\n\n\nThe fact is, nobody has an unlimited cloud budget, and even if you're\non-prem, resources are even more constrained for users that rely upon\nhardware. Sean says that F5 soon realized that, to meet the needs of all\nusers, sensible limits had to be established so one or two mega-users didn't\ndevour all their resources. He has some tips on how to set limits in your\nKubernetes and GitLab runners.\n\n\nWhile some users may be disgruntled that cloud limits exist and are\nenforced, the best method is to keep an open dialogue with users about the\nlimits while recognizing that projects expand and grow over a period of\ntime.\n\n\nFortunately you can set the limits yourself and don’t have to rely on the\ngoodwill of your users to conserve CPU. Kubernetes allows limits by default,\nand GitLab supports K8s request and limits. The K8s scheduler uses requests\nto determine which nodes to run the workload on. Limits will kill a job if\nthe job exceeds the predefined limit – there can be different requests and\nlimits but if requests aren’t specified and limits are, the scheduler will\nuse the limits to determine the request value.\n\n\n[Take a peek at what F5 configured the limits for their Kubernetes GitLab\nrunner](https://gitlab.com/snippets/1926912).\n\n\n```ruby\n\nconcurrent = 200\n\nlog_format = \"json\"\n\n[[runners]]\n  name = \"Kubernetes Gitlab Runner\"\n  url = \"https://gitlab.example.com/ci\"\n  token = \"insert token here\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    namespace = \"gitlab-runner\"\n    service-account = \"gitlab-runner-user\"\n    pull_policy = \"always\"\n\n    # build container\n    cpu_limit = \"2\"\n    memory_limit = \"6Gi\"\n\n    # service containers\n    service_cpu_limit = \"1\"\n    service_memory_limit = \"1Gi\"\n\n    # helper container\n    helper_cpu_limit = \"1\"\n    helper_memory_limit = \"1Gi\"\n```\n\n\n\"We have got currency of 200 jobs, so it will at max spawn 200 jobs and\nyou'll see that we are limiting the CPU use on the build container to two\nand memory to six gigabytes, and on the helper and service CPU and memory\nlimits, we have one CPU and one gig of memory each,\" says Sean. \"And so it\ngives you that flexibility to break it out because generally, you don't\nnecessarily need as much CPU or as much memory on a service that you're\nspending up in your CI job.\"\n\n\n## What comes first: Setting up Kubernetes runners or establishing limits?\n\n\n[DevOps](/topics/devops/) is a data-driven practice, so the idea of setting\nlimits to conserve resources without any underlying data about what users\nare doing can seem counterintuitive. If you’re migrating to Kubernetes\nrunners from a Docker runner or a shell runner, it’s easy enough to\nextrapolate the numbers to establish limits as you set up your Kuberntes\nrunners.\n\n\nIf you’re brand-new to GitLab and GitLab CI, then it’s kind of a shot in the\ndark. Think about your bills and resource constraints: How much memory and\nCPU is available? Is anything else running on your K8s cluster. Chances are,\nyour guesses will be incorrect – but that’s OK.\n\n\nIt might sound obvious, but if you’re running a hosted application on the\nsame K8s cluster as your GitLab CI jobs, don’t set limits based on the\ncapacity of a full K8s cluster. Ideally, you’d have a separate K8s cluster\nfor GitLab CI jobs, but that isn’t always possible.\n\n\n### How F5 Networks did it\n\n\nF5 Networks started with a small team of roughly 50 people and maybe 100\nprojects in GitLab – so setting a limit on K8s wasn’t a major concern until\nthe company and, as a result, projects, started to grow.\n\n\nOnce it came time to set limits to their preexisting K8s runners, the first\nstep was to enable the K8s metric server to monitor how their users consume\nresources. The next step was to determine what users are doing. Sean\nrecommends using a tool like Grafana or Prometheus, which has a native\nintegration within GitLab (although, F5 used a tool called K9), to extract\nthe data from the K8s metric server and display it on some sort of dashboard\nusing Grafana or Prometheus.\n\n\n## Some more tips for Kubernetes runners\n\n\n### Cutting them off: Enforcing limits\n\n\nOnce a user hits their limit, most of the time the end result is their job\ngets killed. Usually the user will notice a mistake, go in, and fix their\ncode, but most likely they will just ask for more resources.\n\n\nThe best way to determine whether or not to allocate more of your finite\nresources to a user is to determine need, Sean explains. Ask the user to\nreturn to you with concrete numbers about the amount of RAM or CPU they\nrequire. But if you don’t have the resources, then don’t overextend\nyourselves to the detriment of your other users.\n\n\n### Use labels to reveal more data\n\n\n[Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set)\nmake it easier to identify workloads in Kubernetes, and can be expanded to\nenvironmental variables within GitLab, for example, job = \"$CI_JOB_ID\" and\nproject = \"$CI_PROJECT_ID\". Labels can be used by admins who are manually\ndoing Quebectal commands against K8s or they can be used in reporting tools\nlike Prometheus or Grafana for setting limits. But labels are the most\nvaluable when it comes to debugging purposes.\n\n\nBear in mind, labels are finicky in Kubernetes. [There are certain\ncharacters (stay away from \"?\") that can cause jobs to\nfail](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4565). There is a\n63 character limit on labels. If there is an unsupported character or the\nlabel is too long, the job won’t start. There won’t be a really good\nindication as to why your job wouldn’t start either, which can be a pain for\ntroubleshooting. [Bookmark this page to learn more about labels in\nKubernetes](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set)\n(including its limitations).\n\n\nGitLab users that run on K8s need to be cautious not to overburden the\nrunner with GitLab CI jobs, and ought to consider setting limits on CPU to\nconserve valuable resources.\n\n\nWant to learn more about how F5 manages their Kubernetes runners on their\nGitLab instance? Watch Sean's presentation at GitLab Commit San Francisco in\nthe video below.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Hks5ElUxkP4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Learn more\n\n\n* [Read on](/solutions/kubernetes/) to learn more about how GitLab and\nKubernetes work together, and explore our plans for future integration with\nKubernetes.\n\n\n* Explore the official documentation on [Kubernetes\nexecutor](https://docs.gitlab.com/runner/executors/kubernetes.html), which\ncovers everything from choosing options in your configuration file to giving\nGitLab Runner access to the Kubernetes API, environment variables, volumes,\nhelper containers, security context, privileged mode, secret volume, and\nremoving old runner pods.\n\n\nCover Photo by [Kolleen\nGladden](https://unsplash.com/@rockthechaos?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/track-and-field?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note.text-center}\n",[784,109,9],"kubernetes",{"slug":786,"featured":6,"template":700},"best-practices-for-kubernetes-runners","content:en-us:blog:best-practices-for-kubernetes-runners.yml","Best Practices For Kubernetes Runners","en-us/blog/best-practices-for-kubernetes-runners.yml","en-us/blog/best-practices-for-kubernetes-runners",{"_path":792,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":793,"content":799,"config":809,"_id":811,"_type":14,"title":812,"_source":16,"_file":813,"_stem":814,"_extension":19},"/en-us/blog/cern-connect-global-researchers",{"title":794,"description":795,"ogTitle":794,"ogDescription":795,"noIndex":6,"ogImage":796,"ogUrl":797,"ogSiteName":686,"ogType":687,"canonicalUrls":797,"schema":798},"CERN uses GitLab to remove the obstacles around global researchers","Learn how GitLab helps particle physics laboratory CERN manage over 7,000 projects globally","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670719/Blog/Hero%20Images/cern.jpg","https://about.gitlab.com/blog/cern-connect-global-researchers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CERN uses GitLab to remove the obstacles around global researchers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kim Lock\"}],\n        \"datePublished\": \"2018-10-12\",\n      }",{"title":794,"description":795,"authors":800,"heroImage":796,"date":802,"body":803,"category":804,"tags":805},[801],"Kim Lock","2018-10-12","\n\nCERN is the European Organization for Nuclear Research. Using highly sophisticated\ninstruments, the organization’s physicists and engineers study the fundamental particles\nthat are the building blocks of the universe. This organization was looking for a way to\novercome the challenges associated with managing thousands of projects with numerous contributors\nlocated all around the world.\n\nTo assist with these challenges, the [CERN IT department searched for a streamlined solution](https://about.gitlab.com/customers/cern/) for\ncode review. In addition to having the capacity to get a large number of projects and users up and\nrunning quickly, they were also looking for their selection to be easy for those users who are less\nexperienced with Git. GitLab met their requirements and they began utilizing these features.\n\nCERN chose to make the move to GitLab for their code hosting needs approximately three years ago. CERN\nhas long been a strong advocate for open source software, and solutions enabling data sovereignty. GitLab’s\nopen core, self-managed model was attractive to the organization because of these desires.\n\n### Today CERN has more than 12,000 users using GitLab and runs 120,000 CI jobs per month\n\n“It’s clearly a powerful tool to do our operations, code collaboration and record discussions on our\ndevelopment and deployment process. We can do more because we can handle more complex projects. As an\nindividual, I’m able to be involved with several large projects because I can rely on GitLab, and the\nother development tools that we have deployed around GitLab, to keep track of things. This is my perception\nas a GitLab user for three years: it’s not that I can do new things, but I can do more because of the\nefficiency of the tool,” said Alex Lossent, Version Control Systems Service Manager, CERN IT department\n\nThe team at CERN's IT department recently sat down with us to share the details of how GitLab is helping\nthem bridge the gaps of working and communicating in a global workspace. “We have this main analysis code on\nGitLab with millions of lines of code. Each team of physicists also has their own repositories with their\nspecific data analysis. And the on-premise nature of GitLab is really useful because we can access other CERN\nservices, data storage and other information that we wouldn’t have on GitHub,” Lukas Heinrich, a partner\nphysicist currently studying at New York University, explained.\n\nYou can learn more about CERN's story and how they are using GitLab in this case stuy [Particle physics laboratory uses GitLab to connect researchers from across the globe](https://about.gitlab.com/customers/cern/)\n","insights",[9,806,807,808],"code review","collaboration","remote work",{"slug":810,"featured":6,"template":700},"cern-connect-global-researchers","content:en-us:blog:cern-connect-global-researchers.yml","Cern Connect Global Researchers","en-us/blog/cern-connect-global-researchers.yml","en-us/blog/cern-connect-global-researchers",{"_path":816,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":817,"content":823,"config":834,"_id":836,"_type":14,"title":837,"_source":16,"_file":838,"_stem":839,"_extension":19},"/en-us/blog/chris-hill-devops-enterprise-summit-talk",{"title":818,"description":819,"ogTitle":818,"ogDescription":819,"noIndex":6,"ogImage":820,"ogUrl":821,"ogSiteName":686,"ogType":687,"canonicalUrls":821,"schema":822},"How Jaguar Land Rover embraced CI to speed up their software lifecycle","Inspiration, persistence, an attitude of continuous improvement – how adopting CI helped this vehicle company implement software over the air.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667619/Blog/Hero%20Images/chris-hill-jlr-does.jpg","https://about.gitlab.com/blog/chris-hill-devops-enterprise-summit-talk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Jaguar Land Rover embraced CI to speed up their software lifecycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-07-23\",\n      }",{"title":818,"description":819,"authors":824,"heroImage":820,"date":826,"body":827,"category":828,"tags":829},[825],"Rebecca Dodd","2018-07-23","\n\n[CI/CD](/topics/ci-cd/) gets us pretty excited anyway, but it's not often we get to talk about how it improves something as cool as a luxury car. Chris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, recently shared his own team's journey from feedback loops of 4-6 weeks to just 30 minutes, in this inspiring talk from [DevOps Enterprise](/stages-devops-lifecycle/) Summit London 2018.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CEvjB-79tOs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways from Chris' talk\n\n### What's needed for transformation\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22Driving change within an enterprise requires three qualities: inspiration, persistence, and an attitude of continuous improvement.%22 – @chillosuvia via @gitlab&amp;amp;url=https://about.gitlab.com/blog/chris-hill-devops-enterprise-summit-talk/&amp;amp;hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"Driving change within an enterprise requires three qualities: inspiration, persistence, and an attitude of continuous improvement.\" – @chillosu via @gitlab \u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n### How you respond to complaints matters\n\n> \"Equally if not more important than the complaint itself, is the response or reaction to the complaint. 'Can I bring a complaint, that I know my voice is heard and that somebody cares about resolving my issue?'\"\n\n> \"'I asked the ops team three weeks ago to add a build dependency on the build servers, and it still hasn't been added. I'm just going to go back to building on my own.' This complaint obviously is a knife right to the heart because you feel like you've started to regress. But what I like about this complaint is it led to a behavioral change as well as a technical change. We decided instead of continuing the same direction, to move to ephemeral Docker containers to run all of our builds. With ephemeral Docker containers we defined every piece of build infrastructure as code. We used packer recipes to find a Docker container, and every app developer could now change the underlying infrastructure which built their application. They were empowered. They now had the self service to do their lifecycle on their own. And you're never going to receive the ops complaint because you've handed over the keys.\"\n\n### Efficient feedback loops are critical\n\n> \"Our feedback loops were 4-6 weeks. Could you imagine writing code today and six weeks from now being told whether or not it works or is broken? I don't remember the shirt that I wore yesterday, let alone what I had for breakfast this morning, let alone what I wrote six weeks ago, and chances are I've been working on features for the last six weeks, and for me to try to unpick what I was thinking at that point could be a huge context-switch penalty.\"\n\n> \"Infotainment also had a significantly higher number of contributors – up to 1,000 contributors. And what we noticed is that contributions don't come linearly, they come in bursts. We actually found that Thursdays were the day that most of our developers committed on. And when we had manual code reviews, if we didn't have reviewers ready on a Thursday, we would create our own backlog.\"\n\n### Deployments don't have to be limited to a traditional release cycle\n\n> \"How could we change the game? Instead of ditching the combustion engine, we ditched the dealership visits, and we implemented software over the air. And this huge Linux distribution that we build upwards towards 700 times per day in a continuous integration pattern, on a dev branch or a master branch, or a release branch, we can now deliver to every vehicle in the form of small, incremental deltas. We can also deliver it to the vehicle while you're driving, and not interrupt your daily life. In fact I showed Gene yesterday, we started a download and an install while I was driving, and the entire thing happened in the background. Jeff even made the comment, 'This is blue-green deployment for vehicles.'\"\n\n> \"One of my favorite indicators is deploys per day, per developer. But I was always embarrassed to share ours because it was always below one. All of our new software wouldn't actually make it to vehicles; it was always batched together. Now I'm happy to say we can deploy, and we have been in our engineering environment, 50-70 times per day of each individual piece of software to a target or to a vehicle.\"\n\n> \"No longer are deployments limited to a traditional software release cycle. We've now skirted every single process to get a technician a new piece of software, and bother somebody else's day – one of our owners – to come into a dealership and spend an hour waiting for their vehicle to be done. We've now empowered the customer to be their own technician.\"\n","customer-stories",[830,807,831,9,832,833],"DevOps","CI","automotive","customers",{"slug":835,"featured":6,"template":700},"chris-hill-devops-enterprise-summit-talk","content:en-us:blog:chris-hill-devops-enterprise-summit-talk.yml","Chris Hill Devops Enterprise Summit Talk","en-us/blog/chris-hill-devops-enterprise-summit-talk.yml","en-us/blog/chris-hill-devops-enterprise-summit-talk",{"_path":841,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":842,"content":848,"config":857,"_id":859,"_type":14,"title":860,"_source":16,"_file":861,"_stem":862,"_extension":19},"/en-us/blog/configuring-your-cluster-with-kubernetes-integration",{"title":843,"description":844,"ogTitle":843,"ogDescription":844,"noIndex":6,"ogImage":845,"ogUrl":846,"ogSiteName":686,"ogType":687,"canonicalUrls":846,"schema":847},"Heroes journey: Working with GitLab's Kubernetes agent","A tutorial on deploying and monitoring an application in Kubernetes without leaving GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682342/Blog/Hero%20Images/treasure.jpg","https://about.gitlab.com/blog/configuring-your-cluster-with-kubernetes-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Heroes Unmasked - How I became acquainted with the GitLab Agent for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jean-Philippe Baconnais\"}],\n        \"datePublished\": \"2022-06-08\",\n      }",{"title":849,"description":844,"authors":850,"heroImage":845,"date":852,"body":853,"category":694,"tags":854},"GitLab Heroes Unmasked - How I became acquainted with the GitLab Agent for Kubernetes",[851],"Jean-Philippe Baconnais","2022-06-08","_A key to GitLab’s success is our vast community of advocates. Here at\nGitLab, we call these active contributors \"[GitLab\nHeroes](/community/heroes/).\" Each hero contributes to GitLab in numerous\nways, including elevating releases, sharing best practices, speaking at\nevents, and more. Jean-Phillippe Baconnais is an active GitLab Hero, who\nhails from France. We applaud his contributions, including leading community\nengagement events. Baconnais shares his interest in Kubernetes and explains\nhow to deploy and monitor an application in Kubernetes without leaving\nGitLab._ \n\n\nSince 2007, I’ve been a developer. I’ve learned a lot of things about\ncontinuous integration, deployment, infrastructure, and monitoring. In both\nmy professional and personal time, my favorite activity remains software\ndevelopment. After creating a new application with multiple components, I\nwanted to deploy it on Kubernetes, which has been really famous over the\nlast few years. This allows me to experiment on this platform. This\nannounces a lot of very funny things. I know some terms, I used them in\nproduction for five years. But as a user, Kubernetes Administration is not\nmy “cup of tea” 😅.\n\n\n## My first deployment in Kubernetes\n\n\nWhen I decided to deploy an application on Kubernetes, I wasn’t sure where\nto start until I saw, navigating in my project in GitLab, a menu called\n“Kubernetes.\" I wanted to know what GitLab was hiding behind this. Does this\nfeature link my project’s sources to a Kubernetes cluster? I used the credit\noffered by Google Cloud to discover and test this platform. \n\n\nDeploying my application on Kubernetes was easy. I wrote [a blog\npost](https://dev.to/jphi_baconnais/deploy-an-quarkus-application-on-gke-with-gitlabci-lgp)\nin 2019 describing how I do this, or rather, how GitLab helped me to create\nthis link so easily. In this blog post I will explain further and talk about\nwhat’s changed since then.\n\n\nBehind the “Kubernetes” menu, GitLab helps you integrate Kubernetes into\nyour project. You can create, from GitLab, a cluster on Google Cloud\nPlatform (GCP), and Amazon Web Services (AWS). If you already have a cluster\non this platform or anywhere else, you can connect to it. You just need to\nspecify the cluster name, Kubernetes API UR, and certificate.\n\n\n![Connect\ncluster](https://about.gitlab.com/images/blogimages/baconcreatecluster.png){:\n.shadow}\n\n\nGitLab is a DevOps platform and in the list of DevOps actions, there is the\nmonitoring part. \n\n\n![Chart of GitLab\nstages](https://about.gitlab.com/images/blogimages/baconstreamline.png){:\n.shadow}\n\n\nGitLab deploys an instance of Prometheus to get information about your\ncluster and facilitate the monitoring of your application.\n\n\nFor example, you can see how many pods are deployed and their states in your\nenvironment. You can also view some charts and information about your\ncluster, like memory and CPU available. All these metrics are available by\ndefault without changing the application of your cluster. We can also read\nthe logs directly in GitLab. For a developer, it’s great to have all this\ninformation on the same tool and this allows us to save time. \n\n\n![Pod\ndeployment](https://about.gitlab.com/images/blogimages/baconhealth.png){:\n.shadow}\n\n\n\n## A new way to integrate Kubernetes\n\n\nEverything I explained in the previous chapter doesn’t quite exist anymore.\nThe release of GitLab 14.5 was the beginning of a revolution. The Kubernetes\nintegration with certificates has limitations on security and many issues\nwere created. GitLab teams worked on a new way to rely on your cluster. And\nin Version 14.5, the [GitLab Agent for\nKubernetes](https://docs.gitlab.com/ee/user/clusters/agent/) was released! \n\n\n## GitLab Agent for Kubernetes\n\n\nGitLab Agent for Kubernetes is a new way to connect to your cluster. This\nsolution is easy to explain: An agent installed on your cluster communicates\nwith your GitLab instance with [gRPC](https://grpc.io/) protocol. Your agent\noffers you useful GitOps features I will explain later. The next picture\nshows you the GitLab Agent for Kubernetes architecture (from GitLab). \n\n\n![GitLab Agent for Kubernetes flow\nchart](https://about.gitlab.com/images/blogimages/baconkubernetesflowchart.png){:\n.shadow}\n\n\n### GitOps defined\n\n\nLet’s quickly define the term “[GitOps](/topics/gitops/)”: It’s a way to\nmanage your infrastructure as code, in a Git project. For me, there are two\naspects in GitOps: “pull” and “push” mode. \n\n\n- Push mode is when your Git project activates the upgrade of your\ninfrastructure following a change. \n\n- Pull mode is when your infrastructure verifies without interruption of\nyour Git project and applies changes automatically.\n\n\nAnd GitLab chose the latter mode for their solution of GitLab Agent for\nKubernetes. Indeed, your agent available on your cluster will check\nfrequently if your project changes. The gRPC protocol is great to respect\nthis intent. When you push a modification on your project, agents detect it\nautomatically, and then your cluster upgrades.\n\n\n### How the GitLab Agent for Kubernetes works\n\n\nThere are some actions to do to install and have a GitLab Agent for\nKubernetes available on your project. \n\n\nFirst, if you create a new project on GitLab, you can use the template\n“Management cluster,” which allows the initialization of files. These files\nallow you to have examples of: \n\n- a declaration of an agent\n\n- a list of starter kits to install DevOps tools\n\n\nGitLab is a DevOps platform that wants to help you to configure all steps of\nthe lifecycle of your project. You can find the configuration of tools like\nPrometheus, Sentry, Ingress, etc. I will detail this later.\n\n\n### The evolution of GitLab Agent for Kubernetes\n\n\nBefore explaining more details about this agent, you have to know one thing.\nThis product is in constant evolution and your feedback is welcome in [this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/342696#note_899701396)\nto improve it. The roadmap is available and each version gives some\ninformation about its evolution.\n\n\n## How to use GitLab Agent for Kubernetes\n\n\nCreating an agent is simple. You have to create a file in the directory\n.gitlab/agents/\u003Cnameofyouragent>/config.yaml. \n\n\n\n![Connect\ncluster](https://about.gitlab.com/images/blogimages/baconstructure.png)\n\n\n\nThe default configuration should contain:\n\n- your project id, represented by your \u003Cuser or group>/project\n\n- a namespace by default to deploy applications if it’s not present in your\nyaml files\n\n- path of your yaml file to apply. This can be a specific file, a directory,\nor a pattern of files\n\n- level of debug\n\n\n```\n\n\ngitops:\n manifest_projects:\n - id: xxxxx/demo-gitlab-kubernetes-cluster-management\n   default_namespace: gitlab-kubernetes-agent-demo\n   paths:\n   - glob: 'deploy.yaml'\nobservability:\n logging:\n   level: debug\n\n```\n\n\nYou can add security to this configuration file with the “ci_access”\nproperty. For example, it allows developers to avoid destroying the\nKubernetes infrastructure 😅. I didn’t explore in detail this part yet. \n\n\nAll configuration options are available on [this reference\npage](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html#gitops-configuration-reference). \n\n\nAfter creating and pushing your file in your project, you have to register\nyour agent. And this action takes two seconds on the GitLab UI. \n\n\n![Add an\nagent](https://about.gitlab.com/images/blogimages/baconaddanagent.png){:\n.shadow}\n\n\nOn the next step, GitLab gives you the Docker command to install your agent\non your cluster. For example:\n\n\n```\n\n\ndocker run --pull=always --rm \\\n    registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/cli:stable generate \\\n    --agent-token=\u003Cyour token generated by GitLab> \\\n    --kas-address=wss://kas.gitlab.com \\\n    --agent-version stable \\\n    --namespace gitlab-kubernetes-agent | kubectl apply -f -\n\n```\n\nYou can copy-paste this command on your cluster and your agent will be\navailable in a Kubernetes namespace. You can see on the GitLab UI that the\nlink with the agent is successful.\n\n\n![Link with agent\nsuccess](https://about.gitlab.com/images/blogimages/baconagentk.png){:\n.shadow}\n\n\n\nYou can also verify this connection in logs of agent container: \n\n\n```\n\n\n{\"level\":\"debug\",\"time\":\"2022-xx-xxT14:11:57.517Z\",\"msg\":\"Handled a\nconnection successfully\",\"mod_name\":\"reverse_tunnel\"}  \n\n\n```\n\n\n### GitLab cluster management \n\n\nGitLab is a DevOps platform and uses tiers of applications to manage all the\nsteps of a modern DevOps pipeline. The “Monitor” part in GitLab is based on\nsome tools such as\n[Prometheus](https://prometheus.io/docs/visualization/grafana/),[Sentry](https://sentry.io/),\n[Vault](https://www.vaultproject.io/), etc. To help you, GitLab created the\ntemplate [GitLab Cluster Management](\nhttps://gitlab.com/gitlab-org/project-templates/cluster-management), which\ngives you a basic configuration of these tools.\n\n\nTo install these tools, a `.gitlab-ci.yml` file is created and defines a job\nto deploy them with helmfile configuration. All these tools, contained in\nthe directory named “applications,” can be overridden or customized in\n`values.yaml` file. \n \nAnd for my experimentation, I used this template and applied a small change\nto have an external IP address for the Prometheus instance. After\nregistering this external IP in GitLab (Menu Settings > Monitor > Alerts),\nthe Monitor menu has data. We can check information about any pods deployed\non my cluster. \n\n\n![Agent\ngraph](https://about.gitlab.com/images/blogimages/baconagentgraph.png){:\n.shadow}\n\n\n## The GitOps aspect \n\n\nThe GitOps aspect can be verified quickly. If you choose to specify one\nmanifest file defining an application deployment, a modification on this\nfile implies an automatic deployment on your cluster. Without CI! This\nallows us to have a faster deployment than if we passed with a pipeline. The\nnew features or fixes will be deployed faster on your infrastructures. And\nif you use the free version of GitLab, your deployment will not count in\nyour CI quota. \n\n\nAfter a commit, the agent detects it and we can see the commit id in the\nagent logs.\n\n\n```\n\n{\"level\":\"info\",\"time\":\"2022-04-11T15:22:44.049Z\",\"msg\":\"Synchronizing\nobjects\",\"mod_name\":\"gitops\",\"project_id\":\"jeanphi-baconnais/demo-gitlab-kubernetes-cluster-management\",\"agent_id\":12804,\"commit_id\":\"e2a82fe6cc82fa25e8d5a72584774f4751407558\"}\n\n\n```\n\n\n## CI/CD tunnel\n\n\nAnother feature that comes with the GitLab Agent for Kubernetes is the CI/CD\ntunnel. Your agent facilitates the interaction with your cluster. You just\nhave to define a KUBE_CONTEXT variable referencing the path of your agent. \n\n\n```\n\nvariables:\n\nKUBE_CONTEXT: \"xxxxx/demo-gitlab-kubernetes-cluster-management:agentk\"\n\n\n```\n\n\nAnd actions on your cluster are available without secret configuration or\nanything else. If you want to execute `kubectl` commands, you can easily use\nthis job:\n\n\n```\n\n\ntest-cicd-tunnel:\n stage: test\n extends: [.kube-context]\n image:\n   name: bitnami/kubectl:latest\n   entrypoint: [\"\"]\n script:\n  - kubectl get ns\n when: manual\n\n```\n\n\n## What's next\n\n\nCurrently, GitLab Agent for Kubernetes doesn’t allow you to get information\nabout the state of pods on your cluster’s environment page.\n\n\n![Success](https://about.gitlab.com/images/blogimages/baconci.png){:\n.shadow}\n\n\nBut GitLab wants to offer the same level of service as the certificate\nintegration. So, check the roadmap ([in this\nissue](https://gitlab.com/groups/gitlab-org/-/epics/3329)) and the contents\nof each release. The template Cluster Management is in progress, too. Some\nissues will give new features for configuration tools.\n\n\nThis experience was so rewarding for me. I would deploy a project on Google\nCloud, and I discovered a new method. I saw this agent described in [GitLab\n14.5](/releases/2021/11/22/gitlab-14-5-released/) but I didn’t imagine the\nimpact it can have on a project. \n\n\nMy colleague [Eric Briand](https://twitter.com/eric_briand) and I had the\nopportunity to speak about this subject at [Malt Academy\nsessions](https://www.malt-academy.com/) and [Meetup GitLab\nFrance](https://www.meetup.com/GitLab-Meetup-France/events/283917115). I\nwill continue to experiment with this agent and try different options for\nthis wonderful product! \n\n\n**This blog post and linked pages contain information related to upcoming\nproducts, features, and functionality. It is important to note that the\ninformation presented is for informational purposes only. Please do not rely\non this information for purchasing or planning purposes. As with all\nprojects, the items mentioned in this video/blog post and linked pages are\nsubject to change or delay. The development, release, and timing of any\nproducts, features, or functionality remain at the sole discretion of GitLab\nInc.**\n\n\nCover image by [Ashin K Suresh](https://unsplash.com/photos/mkxTOAxqTTo) on\nUnsplash.\n\n{: .note}\n",[784,269,9,855,856],"growth","contributors",{"slug":858,"featured":6,"template":700},"configuring-your-cluster-with-kubernetes-integration","content:en-us:blog:configuring-your-cluster-with-kubernetes-integration.yml","Configuring Your Cluster With Kubernetes Integration","en-us/blog/configuring-your-cluster-with-kubernetes-integration.yml","en-us/blog/configuring-your-cluster-with-kubernetes-integration",{"_path":864,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":865,"content":871,"config":877,"_id":879,"_type":14,"title":880,"_source":16,"_file":881,"_stem":882,"_extension":19},"/en-us/blog/continuous-integration-ticketmaster",{"title":866,"description":867,"ogTitle":866,"ogDescription":867,"noIndex":6,"ogImage":868,"ogUrl":869,"ogSiteName":686,"ogType":687,"canonicalUrls":869,"schema":870},"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases","Ticketmaster Android developer Jeff Kelsey shares why GitLab CI was a game changer for his team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682946/Blog/Hero%20Images/tm-cover-image-small.jpg","https://about.gitlab.com/blog/continuous-integration-ticketmaster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Kelsey\"}],\n        \"datePublished\": \"2017-06-07\",\n      }",{"title":866,"description":867,"authors":872,"heroImage":868,"date":874,"body":875,"category":741,"tags":876},[873],"Jeff Kelsey","2017-06-07","\nIt's always been a goal for the Ticketmaster mobile team to get to weekly releases. In the first half of this year we were able to accomplish it, delivering new versions\nof both the Android and iOS app on a weekly basis since February. We've seen the positive impact on our fans, and it was even easier than we thought –\nmaking our entire application development process that much better.\n\nBut it didn't start out this way...\n\n\u003C!-- more -->\n\n![review-2](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review2.png \"Most user-friendly ticketing app\")*\u003Csmall>A faster, more consistent release cycle leads to a better fan experience for users of the Ticketmaster Apps.\u003C/small>\n\nThere comes a time in every engineer’s career when a part of your tech stack no longer passes the “smell test.\" Usually, there is some sort of dramatic event where something that was generally accepted as “isn’t the best, but it works” changes to “this is now a problem.” For me and the Ticketmaster mobile team, this event happened with our Jenkins-based CI pipeline in February.\n\nWe were about to release the newest version of our Android app, but there was a mistake in the build. We had forgotten to increment the Android versionCode, meaning we would need to update and create a new binary file to upload to the store. It was the end of the day, a sunny afternoon quickly fading to darkness in Hollywood. By now it was 6pm PST, and everyone was eager to leave.\n\n\"No problem,\" I thought. I can build the release locally in under three minutes, provide the file to the QA team, and we can all get on our way.\n\n“Won’t help us,” responded my high-standard and exceptional QA team.\n\n“All releases need to come from CI for consistency.” They were right. Local builds would not be safe for production. What if something about my machine’s configuration introduced an issue?\n\n“Ok, so how long does it take for the release build to get created through our Jenkins CI pipeline?” I asked, figuring the time couldn’t be worse than 30 minutes.\n\n“It takes two hours,” came the response. Sigh… Going to be a late night.\n\n![sysiphus](https://about.gitlab.com/images/blogimages/ticketmaster-assets/sysiphus.gif \"Sysiphus\")\n\n*\u003Csmall>Our old CI pipeline\u003C/small>*\n\n## GitLab CI to save the day (in a day!)\n\nTwo… hours…  For a minor change. Now I can’t lay all the blame on Jenkins. Some of this may have been our own fault, generating too many build flavors, forcing clean rebuilds in between steps and running extra tests for deprecated features. But, it was clear we needed to change and get better at CI. Jenkins was always a bit clunky for the last few years. Weighed down by plugins and years of legacy development, it was also difficult for us to update the Jenkins machines with new SDKs, and we had to rely on other teams to assist us. We clearly needed a fresh start.\n\nWe had been using GitLab at Ticketmaster for several years for code review and visually browsing our git history, so it made sense that trying to utilize [GitLab’s new CI tools](/solutions/continuous-integration/) would be worth a shot. I started with a helpful Android [blog post for setting up GitLab CI from Greyson Parrelli](http://www.greysonparrelli.com/post/setting-up-android-builds-in-gitlab-ci-using-shared-runners/).\n\nBut I soon ran into a problem. At Ticketmaster we use Amazon ECR for our [Docker](https://aws.amazon.com/docker/) container registry rather than GitLab repos, like in the tutorial. With the help of Kraig Amador, Tim Nichols, and others at Ticketmaster, I learned how to push my Docker container image to Amazon ECR and pull it down for each Android build in GitLab CI. The final results were a marked improvement:\n\n![gitlab-ci](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Gitlab8min.png 'GitLab CI in 8 minutes')*\u003Csmall>Our GitLab CI build and test takes under 8 minutes to build, test, and publish artifacts.\u003C/small>*\n\nLess than eight minutes total from commit to build, test and generate artifacts. We can use Gradle and the SonarQube plugin to help us calculate code quality with every commit to our codebase, giving us more valuable information in addition to passing failing tests to evaluate all of our merge requests. This gives our team numbers to measure and make goals against.\n\nAnd we could see everything in one place, in GitLab. The iOS team had a more complicated pipeline, but they quickly followed with their own, running their tests on local runners. Since February we have had weekly releases of our mobile apps, and GitLab CI has been a huge part of our success over the past few releases.\n\n## From GitLab artifact to weekly releases\n\n![weekly-release](https://about.gitlab.com/images/blogimages/ticketmaster-assets/WeeklyReleases.png \"Weekly Releases\")*\u003Csmall>GitLab CI has helped us get to weekly releases with more consistent adoption of new releases.\u003C/small>*\n\nWith the benefit of faster cycle time, and faster releases, we have seen other benefits. Since each release has a smaller change set, our crash-free rates and store ratings have improved. We have less time waiting for build and spend more time improving the quality of our products. Our fans are getting features into their hands more quickly and benefit from a higher-quality and a consistently improving product. The CI analytics available on GitLab are an additional scoreboard for our team to optimize and improve into the future.\n\nNow, whenever we integrate new SDKs into our mobile apps, we are helping other teams get their SDK’s set up in GitLab CI to push integrated builds to our suite of integration and functional tests as a part of our process. We are [getting to innovation faster](https://tech.ticketmaster.com/2016/11/08/getting-to-innovation-faster/).\n\nThings were looking pretty scrappy for our CI pipeline only a few months ago. Now it is a whole different ballgame. If your team is looking for a way to breathe fresh life into a legacy CI pipeline, I suggest taking a look at GitLab CI. It has been a real game changer for our mobile team at Ticketmaster.\n\n![review-1](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review1.png \"Ticketmaster Mobile Review 1\")\n![review-3](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review3.png \"Ticketmaster Mobile Review 2\")\n\n### About the Author\n\nJeff Kelsey is the Lead Engineer for Ticketmaster's Android development team. Find him on twitter [@jeffkelsey](https://twitter.com/jeffkelsey).\n",[833,109,9],{"slug":878,"featured":6,"template":700},"continuous-integration-ticketmaster","content:en-us:blog:continuous-integration-ticketmaster.yml","Continuous Integration Ticketmaster","en-us/blog/continuous-integration-ticketmaster.yml","en-us/blog/continuous-integration-ticketmaster",{"_path":884,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":885,"content":891,"config":898,"_id":900,"_type":14,"title":901,"_source":16,"_file":902,"_stem":903,"_extension":19},"/en-us/blog/creating-a-transparent-digital-democracy",{"title":886,"description":887,"ogTitle":886,"ogDescription":887,"noIndex":6,"ogImage":888,"ogUrl":889,"ogSiteName":686,"ogType":687,"canonicalUrls":889,"schema":890},"Government agency builds transparent democracy using GitLab","The Cook County Assessor’s office explains how they're using GitLab to help create a new level of government transparency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679611/Blog/Hero%20Images/cook-county-blog-unsplash.jpg","https://about.gitlab.com/blog/creating-a-transparent-digital-democracy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How one government agency is creating a transparent digital democracy with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-09-02\",\n      }",{"title":892,"description":887,"authors":893,"heroImage":888,"date":895,"body":896,"category":694,"tags":897},"How one government agency is creating a transparent digital democracy with GitLab",[894],"Brein Matturro","2019-09-02","\n\nAt GitLab Connect Chicago, Robert Ross, chief data officer at the Cook County Assessor’s Office,\npresented the talk, “An experiment in digital democracy: How the Cook County Assessor’s\nOffice is using GitLab to reach a new level of transparency.”\n\nThe Chicago Assessor’s Office is responsible for predicting the value of over a million pieces of\nreal estate and reassessing them every three years. Record keeping has always been on paper and\nonly recently has “marginally sophisticated computer programming” been used. Now the Assessor's Office\nwants to turn the process over to software algorithms.\n\n“In a world where the computer is doing the heavy lifting, policy is code and code is policy,”\nRobert says. The algorithms used in assessing a property are dependent on a number of variables. If the\ncode variables are central to the assessment office, as it is for Cook County, it becomes\nimperative that it is made public. “[Our office] ran on a platform of fair, ethical, and\ntransparent assessments. In order to achieve that third pillar, we absolutely have to publish\nthe code that we use to value (a) house,” Robert says.\n\n## Modernizing software and viewpoints\n\nThe Assessment Office had a limited number of days to completely replicate the existing data\nformats that were in place from the previously elected office and to create a transparent\nplatform where property owners could understand how their assessment came to be. There were other\nchallenges too, such as legacy scripts, the inability to integrate older software, and zero\nassistance from the previous office.\n\nRobert and his team turned to GitLab to publish all of their code on residential modeling.\nThey have four repositories with more than 880 commits, all of which the public is able to access.\n“We’re using GitLab completely differently. Our product is your tax assessment and we have to\ndeploy the product on time because if we don’t, the entire government falls apart,” Robert says.\n“We will make mistakes and we have to document those mistakes so that we can be transparent and\ndo our jobs as well as we can.”\n\n## Creating radical policy shifts with transparency\n\nThe ability for property owners to access and own the information that creates their estate value\nhas never been done before at this level. “No county assessor has ever used a public-facing\nrepository for their work,” Robert says. In fact, establishing governing policies has customarily\nbeen done behind closed doors. Cook County has taken an experimental step towards open source\ngovernment policies. “Very few government agencies do it,” he says.\n\nThe Cook County office doesn’t want to stop there. This is just the first step in what it hopes\nare future electoral victories. “We need to demonstrate that transparency is ‘good politics’…\nif transparency becomes a successful evolutionary trait among politicians, you get more of it.”\n\nWant to hear about how Robert and the Cook County Assessment Office use GitLab? Watch his\npresentation in its entirety here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/K8ROmhwphMg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Element5 Digital](https://unsplash.com/@element5digital) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,269,697],{"slug":899,"featured":6,"template":700},"creating-a-transparent-digital-democracy","content:en-us:blog:creating-a-transparent-digital-democracy.yml","Creating A Transparent Digital Democracy","en-us/blog/creating-a-transparent-digital-democracy.yml","en-us/blog/creating-a-transparent-digital-democracy",{"_path":905,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":906,"content":912,"config":919,"_id":921,"_type":14,"title":922,"_source":16,"_file":923,"_stem":924,"_extension":19},"/en-us/blog/customer-interview-charter-communications",{"title":907,"description":908,"ogTitle":907,"ogDescription":908,"noIndex":6,"ogImage":909,"ogUrl":910,"ogSiteName":686,"ogType":687,"canonicalUrls":910,"schema":911},"Better Developer & Customer Experiences with One Application","Director of Product Integration Michael Sobota of Charter Communications shares how they're using GitLab to simplify their toolchain, with big results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663655/Blog/Hero%20Images/gitlab-live-sept-2018.png","https://about.gitlab.com/blog/customer-interview-charter-communications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Customer story: Driving better developer and customer experiences with a single application\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-09-26\",\n      }",{"title":913,"description":908,"authors":914,"heroImage":909,"date":915,"body":916,"category":301,"tags":917},"Customer story: Driving better developer and customer experiences with a single application",[825],"2018-09-26","\nDuring [#GitLabLive](/blog/gitlab-live-event-recap/), customer Michael Sobota,\nDirector of Product Integration at [Charter Communications](https://www.spectrum.com/about.html), joined us to share how adopting\nGitLab as the [single application](/handbook/product/single-application/) for their entire software development lifecycle has brought their\nfeedback loop of two weeks down to a matter of minutes. Charter is an American telecom\ncompany providing services to over 26 million customers in 41 states, and is the second-largest\ncable operator in the US. They have 94,000 employees worldwide.\n\nYou can watch the interview with Michael and check out our key takeaways from it below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/HnTPi7y5MVo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### A single place for all development, operations, and feedback is critical to a great developer experience\n\n Michael: \"It's my job to make sure developers who are providing a digital experience to our\n subscribers have a great developer experience: Helping them realize that vision of quick iterations,\n giving them feedback, shifting left concerns like security and testing, deployments, and getting\n that feedback early in our value stream where it’s cheaper to course correct.\"\n\n\"GitLab has been a cornerstone of our [DevOps platform](/solutions/devops-platform/): using it for source control management,\nfor continuous integration, continuous deployment, a Docker registry, artifacts. We want to give\ndevelopers a single place to get feedback, self-service, and do it in a responsible manner that\nallows us to provide great value to our subscribers.\"\n\n### Quick feedback is also essential to staying competitive\n\nMichael: \"Consumers and subscribers are looking for different, more digital ways to interact\nwith companies and to consume content. Shifting left allows us to be competitive in creating\nthese new, digital ways for consumers to interact with us, whether it’s paying their bill or understanding\nhow their account is set up, ordering a new service, consuming live streaming video, or video on demand.\nCustomers want that quick feedback and do to that we need to shift things left.\"\n\n### Having everything in one place can drastically reduce your feedback loop\n\nMichael: To be able to understand, \"Did my code merge in? Did it build the capacity tests? Did it pass\nthe security standards?\" – these things, in a single place, within the merge request, within that\nUI, have helped us cut down our feedback loop that was typically around our sprint cycle of\naround two weeks, down to minutes.\"\n\n\"Gone are the days of managing different build machines. It’s all in the power of the developers,\nand now from the first line of code on every single branch, we can deploy a mutually exclusive\nenvironment and get feedback in minutes down from that two-week cycle. Now, almost every\nsingle branch of code can have a deployment, and you can have feedback as a developer, as a\nproduct owner, or as a designer, right away.\"\n",[830,9,918],"workflow",{"slug":920,"featured":6,"template":700},"customer-interview-charter-communications","content:en-us:blog:customer-interview-charter-communications.yml","Customer Interview Charter Communications","en-us/blog/customer-interview-charter-communications.yml","en-us/blog/customer-interview-charter-communications",{"_path":926,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":927,"content":933,"config":940,"_id":942,"_type":14,"title":943,"_source":16,"_file":944,"_stem":945,"_extension":19},"/en-us/blog/delta-cloud-native",{"title":928,"description":929,"ogTitle":928,"ogDescription":929,"noIndex":6,"ogImage":930,"ogUrl":931,"ogSiteName":686,"ogType":687,"canonicalUrls":931,"schema":932},"How Delta made the journey to cloud native","Delta tossed aside the rule book to go cloud native and achieve workflow portability. Here's how it's done.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678376/Blog/Hero%20Images/deltacommit.jpg","https://about.gitlab.com/blog/delta-cloud-native","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Delta made the journey to cloud native\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-17\",\n      }",{"title":928,"description":929,"authors":934,"heroImage":930,"date":936,"body":937,"category":694,"tags":938},[935],"Valerie Silverthorne","2019-10-17","\n_Delta Air Lines is the top domestic carrier in the United States, flying over 200 million people a year to more than 300 destinations in 50 countries. Delta is in a highly competitive industry with a lot of moving parts and that’s why, in 2016, the company began a sweeping digital transformation journey. At [GitLab Commit in Brooklyn](/blog/wrapping-up-commit/), Jasmine James, IT manager, DevOps Center of Excellence at Delta, shared how the company journeyed to [cloud native](/topics/cloud-native/) while avoiding vendor lock-in._\n\nDelta’s primary goal was business agility, Jasmine says, and the plan was to get there using cloud native. “We'll do cloud native and then we'll get the business agility, we thought,” she says. “But at Delta, because we have such large, complex systems and a very mission-critical environment, it was not that easy at all.”\n\nTo start, Delta took a hard look at its existing environment and at ways it could be improved. Metrics-based process mapping made it clear the infrastructure was standing in the way of delivering value. A flexible architecture would also make it easier to have scalable and reliable workloads, she explains. The company’s existing tools wouldn’t work with cloud native, so Jasmine’s team set out to find tools that could provide version control, [continuous integration, and continuous delivery](/solutions/continuous-integration/) – the three areas the team considered the [MVP](https://www.techopedia.com/definition/27809/minimum-viable-product-mvp) to get the job done.\n\n## Stick with vowels\n\nThe team came up with an easy-to-remember acronym to describe the criteria used during the tool search: **AEIOU**. **A** is for applicability: Will the tool be applicable for the heavy Java and Linux users at Delta? **E** meant enterprise-ready because Delta needed tried and true maturity. **I** stands for integration, and Jasmine was quick to point out that in this case, it wasn’t about legacy integration but simply a matter of ensuring all the new tools worked well together. **O** is for overhead, which has particular meaning for Jasmine’s team since they manage all the development tools at Delta. “We had to ask ourselves how easy it would be to manage and administer tools for 5000 developers at Delta,” she says. And finally, **U** represents usefulness, which is another way of saying the team wanted to ensure it would choose the right building blocks that would work together.\n\nDelta’s first choice of tools was GitLab, followed by [Sonatype Nexus](https://www.sonatype.com/product-nexus-repository) and Jenkins for CI, Jasmine says. Today Delta is considering expanding its options for developers to also include [GitLab CI](/solutions/continuous-integration/).\n\n## Careful choices = concrete benefits\n\nThe careful thought process has already shown a number of concrete benefits, Jasmine says. Delta created an API to allow customers flying different legs using partner airlines to check in just one time. And the airline’s employees have enhanced decision support around weather events that help to minimize the impact of canceled flights.\n\nBut the benefits go further, Jasmine stresses. “We now have the ability to play the field,” she says. “We not only can leverage the best of breed features in the public cloud space, we also can pick and choose based on public cloud provider performance and cost. With the cost savings we have been able to do a lot (which means we can) fund more great features.”\n\nDelta’s also been able to offer what Jasmine calls a “first class developer experience” because programmers can leverage both the airline’s on premises [Open Shift](https://www.openshift.com) private cloud and scale to the public cloud as needed, all while using familiar programming languages and tools.\n\nJasmine’s take away: “Be you, be different, be great in cloud native. What that means is that although I’ve talked a lot about Delta’s journey, there is no one way to implement cloud native.”\n\nWatch all of Jasmine’s presentation:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zV_hFcxoN8I\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Angela Compagnone](https://unsplash.com/@angelacompagnone) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[939,784,9,939],"cloud native",{"slug":941,"featured":6,"template":700},"delta-cloud-native","content:en-us:blog:delta-cloud-native.yml","Delta Cloud Native","en-us/blog/delta-cloud-native.yml","en-us/blog/delta-cloud-native",{"_path":947,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":948,"content":954,"config":959,"_id":961,"_type":14,"title":962,"_source":16,"_file":963,"_stem":964,"_extension":19},"/en-us/blog/devops-stakeholder-buyin",{"title":949,"description":950,"ogTitle":949,"ogDescription":950,"noIndex":6,"ogImage":951,"ogUrl":952,"ogSiteName":686,"ogType":687,"canonicalUrls":952,"schema":953},"Need DevOps buy-in? Here's how to convince stakeholders","If you need to make the case for DevOps to a non-technical crowd, it's important to be prepared. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681597/Blog/Hero%20Images/speedphoto.jpg","https://about.gitlab.com/blog/devops-stakeholder-buyin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Need DevOps buy-in? Here's how to convince stakeholders\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-09-24\",\n      }",{"title":949,"description":950,"authors":955,"heroImage":951,"date":956,"body":957,"category":804,"tags":958},[780],"2020-09-24","\n\nWe know that DevOps is key to staying nimble in an increasingly competitive marketplace, but chances are your colleagues in finance or marketing aren’t as well-informed about software development.\n\nOne of the major challenges technology teams embedded in non-tech organizations face is convincing key business stakeholders to invest in cutting-edge methodologies such as [DevOps](/topics/devops/). Oftentimes, this challenge comes down to ineffective communication and misaligned incentives.\n\n\"Unfortunately, the divide between these incentives and the misalignment in these incentives is not exclusively held between developers and operators, the similar divide exists between the business and IT, in fact, in the business, they may not even be able to tell the difference between developers and operators, it's all IT to them,\" said [Nathen Harvey](https://twitter.com/nathenharvey), Developer Advocate from Google, at GitLab Virtual Commit. \"Much like from my perspective, it's just the business: finance, marketing, accounting, they all go together and blur in my head.\"\n\nThe best way to get stakeholders to buy-in to DevOps? Align incentives, think big picture, lead with empathy, and come prepared with evidence about the business value of DevOps.\n\n## Align incentives on your technology team\n\nBefore approaching the key decision-makers about investing in DevOps, make sure there is consensus among dev and ops about what direction you’re moving in. The tension between dev and ops teams is well documented: Developers tend to want greater agility, while operators want more stability.\n\n\"We turn to our developers and we say, 'Your job is to build and ship features as fast as possible, your job is agility,'\" said Nathen. \"And then we turn to our operators and we say, 'Your job (is to) make sure that the platform is stable, that nothing ever breaks.'\"\n\nThe good news is, DevOps is a way to have the best of both worlds.\n\nBefore he joined Google, Nathen worked for a retail company where his responsibility was to push the \"deploy\" button to ship new software updates every two weeks. There was a lot of ceremony around deployments, but there was also an office pool about how many of those changes would be rolled back.\n\nResearch by Google Cloud’s DevOps Research and Assessment (DORA) shows that teams that ship smaller features move faster while maintaining a more stable production environment, with numbers to prove it. When comparing the elite performers with the low performers, elite DevOps performers manage to balance speed and stability:\n\n*   Deploy code 208 times more frequently\n*   106 times faster from commit to deploy\n*   Changes are likely to fail just 1/7 of the time\n*   2604 times faster recovery time from incidents\n\nOnce you have developers and operators clamoring for DevOps, it’s time to move on to the next stakeholder tier.\n\n## Think about the business you work for\n\nGitLab is a software company, so we’re always thinking about new ways to deploy faster and more nimble code. If our developers found a new way to achieve this, we’re all ears. Most of our customers don't work for tech companies, but the most successful ones have found a way to make technology relevant to their business’ mission.\n\nFor example, [Delta Airlines found a way to go cloud native](/blog/delta-cloud-native/) because it fit into their mission of business agility. Whether you’re in transportation or e-commerce, business agility is something we can all agree on. Make a list of the top three priorities for your company and think about what your customers want (e.g., in the pandemic it may be an app with reliable curbside pick-up). Think about your company’s mission and business strategy and sketch out a compelling case for why DevOps will help your business edge out the competition.\n\n## Lead with empathy and think strategically\n\nBefore approaching your collaborators on the business side of things, put yourself in their shoes. Think in-depth about their motivations and goals to find the most compelling way to communicate with them.\n\nFirst, write your problem statement (e.g., \"I want to adopt a more agile DevOps strategy\"). Next, identify three key stakeholders across different teams on the business side of things (e.g., Max in Marketing, Alex in Accounting, and Lee in Legal). After that, conduct an informal thought exercise to enable more empathetic and strategic thinking:\n\n*   Look at their job description. What are their core responsibilities?\n*   Think about resourcing. What are their resource constraints?\n*   What is their level of influence over the decision? Grade their influence on a scale of one to five (one being low influence, five being high influence)\n*   How does helping your tech team be more agile impact their team’s performance and goals?\n\nIn the end, communicating with stakeholders about DevOps is all about finding common ground.\n\n## Close with evidence\n\nLet’s face it, the business side of your organization might not know the difference between a developer and an ops pro any more than you understand the intricacies of accounting, and that’s OK. So long as things aren’t broken, the gatekeepers are probably disinclined to fix it. But what if you can demonstrate just how much better things could be with a more [agile software delivery strategy](/solutions/agile-delivery/)?\n\nThe DORA team at Google created a [rigorous State of DevOps research program](https://www.devops-research.com/research.html) that assesses how different industries can improve software delivery. A simple five-question survey on five DevOps capabilities will rank your team into four tiers of performance – between low performer and elite performer.\n\nEvaluating your progress is key. Nathen's deployments at a previous employer had good \"time to restore\" rates but the fail change rate was between 16-30%, a metric that leaves a lot of room for improvement.\n\n\"We felt like we were doing really well, and in fact, we were, we had made a ton of great progress, but there were still lots of opportunities for us to improve,\" said Nathen. \"So using this quick check can help you and your team identify where are some opportunities for you to improve? How do you stand up against the others within your industry?\"\n\nIn the end, Nathen’s team ranked as a medium performer. So how does your team line-up? By coming prepared to the meeting with evidence on concrete ways a DevOps methodology can lead to more business agility, you are more likely to get the endorsement of key stakeholders on your plan.\n\n## Learn more about measuring software delivery\n\nLearn more about measuring DevOps by watching the keynote featuring Nathen and Dina Graves Portman from GitLab Virtual Commit. [Watch the other keynotes](https://www.youtube.com/playlist?list=PLFGfElNsQthYQaTiUPQcu4O0O20WHZksz), including a [presentation](https://youtu.be/xn_WP4K9dl8) by [GitLab CEO Sid Sijbrandij](/company/team/#sytses).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yUyZExE-5TU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnap?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/speed?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[830,9,807],{"slug":960,"featured":6,"template":700},"devops-stakeholder-buyin","content:en-us:blog:devops-stakeholder-buyin.yml","Devops Stakeholder Buyin","en-us/blog/devops-stakeholder-buyin.yml","en-us/blog/devops-stakeholder-buyin",{"_path":966,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":967,"content":973,"config":979,"_id":981,"_type":14,"title":982,"_source":16,"_file":983,"_stem":984,"_extension":19},"/en-us/blog/dockerizing-review-apps",{"title":968,"description":969,"ogTitle":968,"ogDescription":969,"noIndex":6,"ogImage":970,"ogUrl":971,"ogSiteName":686,"ogType":687,"canonicalUrls":971,"schema":972},"Dockerizing GitLab Review Apps","A GitLab user shows us how to deploy Docker containers as a Review App.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680430/Blog/Hero%20Images/dockerizing-review-apps.jpg","https://about.gitlab.com/blog/dockerizing-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Dockerizing GitLab Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephan Hochdörfer\"}],\n        \"datePublished\": \"2017-07-11\",\n      }",{"title":968,"description":969,"authors":974,"heroImage":970,"date":976,"body":977,"category":741,"tags":978},[975],"Stephan Hochdörfer","2017-07-11","Last year GitLab introduced the [Review\nApps](https://docs.gitlab.com/ee/ci/review_apps/) feature. Review Apps are app\nenvironments that are created dynamically every time you push a new branch\nup to GitLab. As a bonus point the app environments are automatically\ndeleted when the branch is deleted. Since we moved to using Docker for quite\na few of our projects I was keen on figuring out how to combine Docker and\nthe GitLab Review Apps functionality as the documentation only mentions\nNGINX as a way to run Review Apps. As it turns out, it is rather simple to\ndeploy Docker containers as a Review App.\n\n\n\u003C!-- more -->\n\n\nIn our scenario the GitLab Runner for building the Docker image and the\nGitLab Runner for \"running\" the Review Apps make use of the shell executor,\nthat way we do not have to deal with Docker-in-Docker issues. Besides\ninstalling the gitlab-ci-multi-runner package we also installed Docker and\ndocker-compose.\n\n\nFirst of all, we define two build stages in the .gitlab-ci.yml file – the\nbuild and deploy stage:\n\n\n```html\n\nstages:\n  - build\n  - deploy\n  ```\n\nThe build stage is defined like this:\n\n```html\n\nbuild:\n  tags:\n    - php7\n  stage: build\n  script:\n    - echo \"Building the app\"\n    - composer.phar install\n    - docker build -t myproject/myapp .\n    - docker tag myproject/myapp:latest \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker push registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  only:\n  - master\n  ```\n\nThis will create the Docker image and push it to our Sonatype Nexus instance\nwhich serves as a private Docker registry for us. As you can see I make use\nof the $CI_COMMIT_REF_NAME variable when tagging the Docker image. That way,\nwe end up with a Docker image per branch. Downside: you cannot use\ncharacters in the branch name which are no valid Docker version identifiers.\nI still need to figure out a fix for this.\n\n\nThe deploy stage consists of two jobs: one for deploying the container, the\nother for undeploying the container:\n\n\n```html\n\ndeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Deploy to dev.loc\"\n    - docker pull registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker run -d -P -l traefik.enable=true \\\n      -l traefik.frontend.rule=Host:reviewapp.dev.loc \\\n      -l traefik.protocol=http --name reviewapp-demo-$CI_COMMIT_REF_NAME \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  environment:\n    name: dev\n    url: http://reviewapp.dev.loc\n  only:\n  - master\n  ```\n\nWhen this code is run it will simply pull the latest image from the private\nDocker registry and run it. Since the gitlab-runner user will push the image\nto the registry the user needs an account there and needs to be\nauthenticated against the registry. I could not find a way how to configure\nthe registry credentials via the .gitlab.yml file, so I ssh'ed into the\nboxes and manually run a \"docker login registry.loc\" for the gitlab-runner\nuser. Currently we do not have many servers - virtual machines in our case -\nso that approach is fine, but does not scale in the future.\n\n\nWhen running the container we set a fixed name for the container. That way,\nwe can easily stop it when it comes to the undeploy job. We also define some\nTraefik labels as we use Traefik in front of the docker daemon to route the\nrequests. Traefik itself runs in a container as well. The Traefik container\nis launched like this:\n\n\n```html\n\ndocker run -d --restart=always -p 8080:8080 -p 80:80 -p 443:443 \\\n\n-l traefik.enable=false --name=traefik \\\n\n-v /var/run/docker.sock:/var/run/docker.sock \\\n\n-v /etc/traefik/traefik.toml:/etc/traefik/traefik.toml \\\n\n-v /etc/traefik/ssl/cert.key:/etc/traefik/ssl/cert.key \\\n\n-v /etc/traefik/ssl/cert.pem:/etc/traefik/ssl/cert.pem \\\n\ntraefik\n\n```\n\n\nWe do not use any fancy Traefik configuration, just the defaults for the\ndocker backend. Since the Review Apps server runs in our intranet and uses\nour intranet domain name we were not able to use the Let's Encrypt support\nbuilt in Traefik. Instead, we were required to generate a self-signed SSL\ncertificate and mount that in the Traefik container.\n\n\nThe undeploy job is the final piece of the puzzle. GitLab allows you to\nmanually stop Review Apps by clicking a Pause button the GitLab UI. To\nundeploy a Review App we simply stop and remove the container by the defined\nname.\n\n\n```html\n\nundeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Remove review app from dev.loc\"\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n  when: manual\n  environment:\n    name: dev\n    action: stop\n```\n\n\nBoth the deploy_dev and the undeploy_dev job are bound by the tag \"dev\" to\nthe dev server which hosts our docker instances. That way the docker\ninstances will always start on the right server.\n\n\n## About the Author\n\n\n[Stephan Hochdörfer](https://twitter.com/shochdoerfer) currently holds the\nposition of Head of Technology at [bitExpert AG](https://www.bitexpert.de),\na company specializing in software and mobile development. His primary focus\nis everything related to web development as well as automation techniques\nranging from code generation to deployment automation.\n\n\n_This post was originally published on\n[blog.bitexpert.de](https://blog.bitexpert.de/blog/dockerizing-gitlab-review-apps/)._\n\n\n[Cover image](https://unsplash.com/@guibolduc?photo=uBe2mknURG4) by\n[Guillaume Bolduc](https://unsplash.com/@guibolduc) on Unsplash\n\n{: .note}\n",[9,109],{"slug":980,"featured":6,"template":700},"dockerizing-review-apps","content:en-us:blog:dockerizing-review-apps.yml","Dockerizing Review Apps","en-us/blog/dockerizing-review-apps.yml","en-us/blog/dockerizing-review-apps",{"_path":986,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":987,"content":993,"config":1000,"_id":1002,"_type":14,"title":1003,"_source":16,"_file":1004,"_stem":1005,"_extension":19},"/en-us/blog/donatinator-open-source-donation-platform",{"title":988,"description":989,"ogTitle":988,"ogDescription":989,"noIndex":6,"ogImage":990,"ogUrl":991,"ogSiteName":686,"ogType":687,"canonicalUrls":991,"schema":992},"The Donatinator: Simple donation solution for charities","This guest author shares his passion project: a free and open source solution for small charities and non-profits to accept donations online.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679940/Blog/Hero%20Images/donatinator-open-source.jpg","https://about.gitlab.com/blog/donatinator-open-source-donation-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Donatinator: A simple, secure way to accept donations to your charity or non-profit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Chilton\"}],\n        \"datePublished\": \"2019-02-06\",\n      }",{"title":994,"description":989,"authors":995,"heroImage":990,"date":997,"body":998,"category":694,"tags":999},"The Donatinator: A simple, secure way to accept donations to your charity or non-profit",[996],"Andrew Chilton","2019-02-06","\n\nMany small non-profits or charity organizations all over the world find it really difficult to accept one-off donations or set up monthly subscriptions online. I know this from firsthand experience.\n\nLast year my wife came to me asking how the organization she volunteers at – a mothers’ support group – could accept donations on their website. My first thought was that the (free) hosting provider they were using would have a feature to do that, but no, not unless you pay, and for a small charity even $10 or $20 per month is too expensive.\n\nMy second thought was to investigate hosting or donation portals. Here's where the journey started.\n\n## Donation platforms offer a mixed bag\n\nAfter looking at various donation platforms, we realised that many differences exist and that you can't always have it all. Some of them:\n\n* Are only http unless you pay up front.\n* Support single donations OR subscriptions, but not both.\n* Are based around a fundraising model (to attain a target amount) but don't support ongoing payments.\n* Are US only, but since we're in New Zealand we needed something that would work here.\n* Provide an iframe payment page but not all.\n* Have a free tier but others required payment from day 1.\n* Don't have a team plan such that members of the charity committee can log in and administer the portal.\n\n## Looking for an open source solution\n\nI kept thinking to myself that there must be an open source project out there already that could do all of this for free. Small charities and non-profits don't have the ability to pay for things up front, especially when it's not part of their core mission. After a while reading, reviewing, comparing, and planning, my non-negotiable for the platform became that \"We didn't have to pay more than necessary.\"\n\nThe only fee we wouldn't be able to get around was credit card processing. Added that we would only pay a percentage fee once we receive a donation rather than up front was also a good result.\n\nBeing a coder and being unimpressed with the status quo, I started coding. Within a month [The Donatinator](http://donatinator.org/) ([demo](https://donatinator.herokuapp.com/), [code](https://gitlab.com/donatinator/)) was born.\n\n## The Donatinator\n\nShortly after launch, The Donatinator can already accept one-off or recurring donations, add and edit simple Markdown pages, and allow multiple team members to log in for administration and basic reporting.\n\nMore features are planned, but the most important thing about the project is that it should be guided by a few founding principles. These are (far from perfect, but a good start):\n\n- The software should be open source so it is free for the end user, for now and always.\n- A basic installation should run within the free tier of various hosting providers.\n- The user should only have to pay for credit card processing fees (but if we can get around this one day we will!) 😃\n\n## Why open source?\n\nAllowing anyone and everyone to use, download, install, change, contribute, and enjoy The Donatinator is paramount to enabling every organisation anywhere to accept donations and allow them to continue the great work they are doing and the help they are providing.\n\n### Why GitLab?\n\nSince we ourselves are open source, choosing a code-hosting provider that is also open source aligns with our values nicely. \u003Cplug> GitLab is the natural home of projects like us and we're very grateful of their hospitality (as well as their 2,000 CI pipeline minutes per month!). \u003C/plug>\n\nFunnily enough this also brought home the idea that it's not actually just the technology that is the interesting part of the project. GitLab's handbook has a great page on values but a very small part of that is the idea of [boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency) which we're also using to guide our technology decisions, keeping things simple and lite.\n\n### A word on pragmatism\n\nEven though we'd love everything to be open source, we know we can't have everything. With that we'd like to thank the following companies that we're currently using to make The Donatinator fulfill its aim. With free plans on Heroku, Google, Glitch, Zeit, MailGun, and others, we should be able to achieve these goals for charities who may only receive a few donations each month, which can make all the difference between helping people or closing down completely due to insufficient funds. Also thanks to Stripe for having a discounted fee for registered charities to maximize each and every donation.\n\nWhich leads me to a confession ...\n\n## A high high, and a low low\n\nStarting a new project is always exciting. Tap tap, code, test, commit, one late night after another. But then the bad news came ...\n\nThe small charity all of this work was initially done for decided to use an existing donor platform. I can understand why, but rather than dwelling on it, I decided to continue working on The Donatinator anyway. I'm still convinced there is a place for it in the world and a variety of people and organisations can benefit from it, if only they knew about it.\n\n## Asking for help, contributions, and donations\n\nWithout shame I am now asking you all for help. The Donatinator is a new project and there is still lots to do, however there are three main areas in which help would be awesome and greatly appreciated!\n\n### Please contribute!\n\nFirstly, contributions of [code](https://gitlab.com/donatinator/donatinator/) and [documentation](https://gitlab.com/donatinator/docs/) are welcome and very important. Participating in the [community](https://spectrum.chat/donatinator) also helps a project thrive and we'd love to chat to you about your needs and requirements.\n\n### Please donate!\n\nSecondly, I'm looking for [patrons and sponsorship](https://donate.donatinator.org/) (yes, it's self hosted) to be able to take the project forward faster. Sustainable open source is still a panacea but I believe it can happen. I don't believe that the charities and non-profits who use The Donatinator should have to pay for the use of it but that means we need to look elsewhere to help with sustainability.\n\n### Please spread the word!\n\nAnd finally but most importantly – users! If there are no users, then there is no project.\n\nIf you know a person, a non-profit, or a charity who could use [The Donatinator](http://donatinator.org/), please get in touch with them. Many are run by non-technical volunteers and they would love to have your help in setting up online donations. Get in touch with us too, for help or if you have any questions – we'd love to hear about your progress and your feedback would be invaluable!\n\n(You could also run The Donatinator yourself for your own open source project or for your own patron portal. Hint hint! 😃)\n\nThere is lots of functionality penciled in for future Donatinator releases but there is nothing like having real users provide ideas or ask for specific features. This is a terrific opportunity to help the helpers ... so come on, let's make it happen! We can do this 😃\n\nCover image by [Steve Johnson](https://unsplash.com/photos/0sPFjdcRhko?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/coins?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[697,269,9],{"slug":1001,"featured":6,"template":700},"donatinator-open-source-donation-platform","content:en-us:blog:donatinator-open-source-donation-platform.yml","Donatinator Open Source Donation Platform","en-us/blog/donatinator-open-source-donation-platform.yml","en-us/blog/donatinator-open-source-donation-platform",{"_path":1007,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1008,"content":1014,"config":1023,"_id":1025,"_type":14,"title":1026,"_source":16,"_file":1027,"_stem":1028,"_extension":19},"/en-us/blog/forrester-tei",{"title":1009,"description":1010,"ogTitle":1009,"ogDescription":1010,"noIndex":6,"ogImage":1011,"ogUrl":1012,"ogSiteName":686,"ogType":687,"canonicalUrls":1012,"schema":1013},"Estimate your GitLab ROI with Forrester's economic study","Now available: A new Forrester ROI study and calculator based on real value customers got from using GitLab for SCM, CI, and CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/forrester-tei","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Colin Fletcher\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":1015,"description":1010,"authors":1016,"heroImage":1011,"date":1018,"body":1019,"category":1020,"tags":1021},"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator",[1017],"Colin Fletcher","2020-07-29","\n\nWe consistently hear from the global GitLab family (our community, customers, and really anybody interested in GitLab) that they know from experience that GitLab helps them do the work they want to do, faster and better, and that it’s a valuable, even vital, part of their success. But they often have a difficult time describing the value GitLab delivers, especially in specific, quantified ways. We also regularly hear that the hardest part about quantifying \"value\" is knowing where and how to start. \n\n**Enter the Forrester Total Economic Impact™ (TEI) of GitLab: studying real customer experiences**\n \nSo to help everyone better understand the value proposition, GitLab commissioned Forrester Consulting to conduct a [Total Economic Impact™ (TEI) study](/resources/report-forrester-tei/) examining the potential return on investment (ROI) organizations may realize by using GitLab for version control & collaboration (VC&C)/SCM, [continuous integration (CI), and continuous delivery (CD)](/topics/ci-cd/) - all use cases that represent where many teams begin or expand their use of GitLab.  \n\nTo start, GitLab customers were independently interviewed by Forrester Consulting. The interview experiences and any other data collected was then used to create multiple models which in turn generated quantified results based on the combined experiences of all of the customers studied. The data collected, resulting models, and study itself were then reviewed independently by Forrester Research analysts. GitLab stakeholders were also interviewed as part of the data gathering and review process.  \n\n**Significant results and useful tools to discover your ROI**\n\nJust a sampling of the results realized by the composite organization over an analysis period of three years, based on GitLab customer experiences, yielded these potential, quantifiable benefits in the form of:  \n\n- An overall 407% return on investment (ROI) \n- Improved development and delivery efficiency \n  - Ex. 87% improved development and delivery efficiency (reduced time), resulting in over $23 million in savings \n- Revenue from increased number of releases \n  - Ex. 12x increase in the number of revenue generating application releases in a year, resulting in $12.3 million in additional revenue \n- Improved Code Quality \n  - Ex. 80% reduction in code defects, resulting in over $16.8 million in savings \n- Savings from reducing the number of tools in use \n  - Ex. $3.7 million in savings from using four fewer tools (with their associated costs) each year  \n\nNow these results, while impressive, are based on the experiences of the GitLab customers studied and as with all models, your own unique experience will vary. As such we encourage you to spend time looking over [the study](/resources/report-forrester-tei/) to better understand where the numbers came from and how they may or may not relate to your situation and what you are working to achieve.  \n\nTo help you take the next step of estimating your own potential results, we are thrilled to make available an [online estimator](https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html) that is based on the TEI study’s models. Enter your own data and you'll get a customized version of the study.  \n\n**Couldn’t have done it without you**\n\nLastly, we want to offer our deepest thanks to the incredibly generous GitLab customers who were willing to share their experiences in this way. They helped all of us in our respective journeys. Thank you! \n\n**Get started today!** \n\n- [Download the Forrester Total Economic Impact™ Study commissioned By GitLab, June 2020](/resources/report-forrester-tei/)\n- \u003Ca href=\"https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html\" target=\"_blank\">Fill out your info in the online estimator and get a custom report based on the TEI study data and models\u003C/a>\n","news",[109,807,830,1020,1022,9],"research",{"slug":1024,"featured":6,"template":700},"forrester-tei","content:en-us:blog:forrester-tei.yml","Forrester Tei","en-us/blog/forrester-tei.yml","en-us/blog/forrester-tei",{"_path":1030,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1031,"content":1037,"config":1042,"_id":1044,"_type":14,"title":1045,"_source":16,"_file":1046,"_stem":1047,"_extension":19},"/en-us/blog/freedesktop-org-migrates-to-gitlab",{"title":1032,"description":1033,"ogTitle":1032,"ogDescription":1033,"noIndex":6,"ogImage":1034,"ogUrl":1035,"ogSiteName":686,"ogType":687,"canonicalUrls":1035,"schema":1036},"Welcome to GitLab, freedesktop.org!","Freedesktop.org, the home of open source desktop technology development, has migrated to GitLab to improve their workflow and modernize their service.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671252/Blog/Hero%20Images/gitlab-desktop-org-cover.png","https://about.gitlab.com/blog/freedesktop-org-migrates-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to GitLab, freedesktop.org!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-20\",\n      }",{"title":1032,"description":1033,"authors":1038,"heroImage":1034,"date":1039,"body":1040,"category":694,"tags":1041},[825],"2018-08-20","\nSorry to [keep banging on about it](/blog/drupal-moves-to-gitlab/), but we get pretty excited when [open source projects](/blog/welcome-gnome-to-gitlab/) tell us they’re [#movingtogitlab](/blog/movingtogitlab/). There’s always more room at our inn. So we’re very happy to welcome [freedesktop.org](https://www.freedesktop.org/wiki/) into the fold! We chatted to Daniel Stone, project administrator, about what the project does and why they’re joining us.\n\n## Q & A\n\n- [What is freedesktop.org?](#what-is-freedesktoporg)\n- [How is freedesktop.org used?](#how-is-fdo-used)\n- [What's the connection between freedesktop.org, X Window System, and Linux?](#whats-the-connection-between-fdo-x-window-system-and-linux)\n- [How many contributors work on the project?](#how-many-contributors-work-on-the-project)\n- [Why would someone use freedesktop.org instead of macOS or Microsoft Windows?](#why-would-someone-use-fdo-instead-of-macos-or-microsoft-windows)\n- [Why are you migrating to GitLab?](#why-are-you-migrating-to-gitlab)\n- [How are you anticipating the move to be beneficial?](#how-are-you-anticipating-the-move-to-be-beneficial)\n\n### What is freedesktop.org?\n\nCreated in 2000 by Havoc Pennington (a GNOME developer), freedesktop.org (or fd.o) is a [forge](https://en.wikipedia.org/wiki/Forge_(software))-type hosting site. The idea was to create a neutral collaboration space between [GNOME](/blog/welcome-gnome-to-gitlab/), [KDE](/blog/welcome-kde/), Enlightenment, and other open source desktops. Unlike integrated systems, like Windows and macOS, the open source desktop lacks a lot of shared foundations: what should you open files with, how should you manage windows, and so forth.\n\nOriginally fd.o was a home for these desktop developers to collaborate on common standards, so programs could run portably with the same functionality across different desktops. In 2004, xwin.org was formed by a group of open source graphics developers unhappy with the closed-shop state of the XFree86 project. The two projects of fd.o and xwin.org merged shortly after xwin.org’s founding, with fd.o playing host to the X.Org Foundation, which supervises and facilitates the ongoing development of the graphics stack.\n\nOver the years since, our role as a neutral home for all sorts of desktop technology development has seen us add projects such as GStreamer, LibreOffice, and PulseAudio to our diverse family. Some projects such as systemd and Flatpak originally began their development on fd.o, but moved out to other hosting platforms which better suited their needs and workflow.\n\n### How is fd.o used?\n\nMost of our projects are invisible to users: NetworkManager is probably responsible for driving your Wi-Fi under the hood, though you’re unlikely to interact with it directly. Mesa and Wayland/X.Org will provide the underlying plumbing to render your games and your whole UI, but these are mostly invisible. Your desktop probably leans heavily on the D-Bus message-passing system. Most of it is plumbing.\n\n### What's the connection between fd.o, X Window System, and Linux?\n\nAs part of the graphics stack, fd.o hosts the development of the Linux kernel’s graphics development: drivers from all vendors part of the mainstream kernel (and some which aren’t yet!) use our Git hosting, mailing lists, bug tracking, and other services to build the core kernel graphics infrastructure. All this development happens on our infrastructure, which is then fed into the core Linux kernel during its \"merge window\" every release.\n\nThe X.Org Foundation tries to enable the work of a wide body of open source graphics projects. Originally X.Org itself was just the X Window System, but over the years the code evolved out of X.Org into a number of enabling projects. These include not just alternative window systems such as Wayland, the Mesa 3D graphics library for hardware-accelerated OpenGL, OpenGL ES and Vulkan, Cairo and Pixman for software rendering, libinput for input device handling, and much more. We play host to all those projects, with the Foundation providing an accountable body for administrative work, conference organization, and so on.\n\nOther freedesktop.org projects, as said before, provide all the glue around the margins of your desktop. Providing a database of available applications and preferred MIME type handlers, network device management, inter-process communication, a PDF renderer; in general, all the things we can do well in one place, to enable people who want to write desktop environments to focus on the thing that matters to them: building the actual desktop!\n\nAs part of this, we’ve always tried to stay strenuously vendor-neutral and also project-neutral within the desktop community. Rather than \"picking winners\" or enforcing directions on external projects, we try to slowly and gently build consensus as a neutral forum.\n\n### How many contributors work on the project?\n\nHard to say! We have around 1,300 registered users who directly commit to our family of projects. Not all of them are active of course, but many developers do not have direct commit access and aren’t represented in that figure. We have around 25,000 people subscribed to our various development mailing lists.\n\n### Why would someone use fd.o instead of macOS or Microsoft Windows?\n\nMuch like GitLab, freedesktop.org is an open source, open-participation, neutral platform. Running an open source desktop through distributions such as Arch, Debian, Fedora, or Ubuntu – all of which use our enabling technology – gives the user a fully open source system. This is incredibly empowering: as a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream.\n\n>As a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream\n\n### Why are you migrating to GitLab?\n\nOver the years fd.o has been running, we’ve accumulated a wide variety of services: our LDAP-based account system forked back in 2004, Bugzilla for issue tracking, Mailman for mailing lists, cgit and hand-rolled Git hosting, Patchwork for pulling patches from the mailing list when they are submitted for review, Jenkins for build infrastructure, ikiwiki for project wikis, still an FTP server somewhere; the list goes on.\n\nIn terms of workflow, we simply can’t provide some of our projects the workflow they want with this infrastructure. Over the years since we begun, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users. We saw multiple projects either leave fd.o and move to alternate hosting platforms, or just not develop on our infrastructure to begin with, because we weren’t offering anything like the same level of functionality and convenience as those services.\n\n>Over the years, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users.\n\nOne of the issues with freedesktop.org being such a diverse family, is that there is no central driven organization behind it. The site is currently run by three volunteers, all of whom keep the site running in our spare time. Maintaining all these services – many of them forked to add now-essential features like spam prevention, as well as our own custom local work for service integration – takes a surprising amount of time, to the point where just keeping it running is about all we can do. Actual improvements are very difficult to implement in the time we have, and even when we can do them, making sure all our projects can take full advantage of them is sometimes too much for us.\n\n### How are you anticipating the move to be beneficial?\n\nFirstly, for the workflow, having linked repository management, issue tracking, code review, CI pipelines and feedback, container repositories, wikis, and websites, provides functionality we couldn’t before – or at least, we were providing a pale imitation of it. As all of this is provided in [GitLab Core](/pricing/) and backed by a single coherent permission model, we are able to open these services up to our member projects who can work with them autonomously, rather than waiting for the admins to deal with services for them.\n\nFrom an admin point of view, having a single application which takes care of all of this will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now. Bringing GitLab up on Kubernetes has not been without its challenges as we attempt to bring our service administration skills up into the 21st century, but already it’s shown us that we can move drastically quicker than we have been able to in the past.\n\n>From an admin point of view, having a single application which takes care of our entire workflow will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now\n\nIn terms of service modernization, another huge improvement is a modern approach to identity and security. Running an open community site in 2018 is not a fun place to be: not just keeping on top of security vulnerabilities, but targeted break-in attempts and spam. A lot of our previous services aren’t designed to deal with this kind of abuse. Having a single identity service on GitLab – which can link to external identity providers such as Google and GitLab.com, and make use of two-factor authentication – is a huge leap forward for us. Similarly, a coherent approach to spam which doesn’t involve spending an evening trawling through SQL tables by hand makes dealing with spam actually practical!\f\n\n### How can people get involved?\n\nSince we are an umbrella of diverse projects, there's no single answer. We keep a [list of our active projects on our website](https://www.freedesktop.org/wiki/GettingInvolved/): pick the one that's closest to your heart, check out their site and repo, and send your first MR.\n",[697,269,9,918],{"slug":1043,"featured":6,"template":700},"freedesktop-org-migrates-to-gitlab","content:en-us:blog:freedesktop-org-migrates-to-gitlab.yml","Freedesktop Org Migrates To Gitlab","en-us/blog/freedesktop-org-migrates-to-gitlab.yml","en-us/blog/freedesktop-org-migrates-to-gitlab",{"_path":1049,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1050,"content":1056,"config":1063,"_id":1065,"_type":14,"title":1066,"_source":16,"_file":1067,"_stem":1068,"_extension":19},"/en-us/blog/from-monolith-to-microservices-how-to-leverage-aws-with-gitlab",{"title":1051,"description":1052,"ogTitle":1051,"ogDescription":1052,"noIndex":6,"ogImage":1053,"ogUrl":1054,"ogSiteName":686,"ogType":687,"canonicalUrls":1054,"schema":1055},"From monolith to microservices: How to leverage AWS with GitLab","GitLab recently spent time with Ask Media Group and AWS to discuss how modernizing from self-managed to a cloud native system empowers developers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679645/Blog/Hero%20Images/askmediablog-.jpg","https://about.gitlab.com/blog/from-monolith-to-microservices-how-to-leverage-aws-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From monolith to microservices: How to leverage AWS with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2020-03-24\",\n      }",{"title":1051,"description":1052,"authors":1057,"heroImage":1053,"date":1058,"body":1059,"category":694,"tags":1060},[894],"2020-03-24","\n\nAsk Media Group operates over 30 websites and provides enriched search results, articles, galleries, and shopping sites to over 100 million unique visitors each month. About two years ago, [Ask Media](https://www.askmediagroup.com/) was looking for ways to grow the business, draw advertisers, and expand its audience. Routine tasks like onboarding developers or releasing software took too long. The monolithic system that was in place had limited capabilities and added financial burdens for services that went unused. \n\nChenglim Ear, principal software engineer at Ask Media, recently sat down with [Trevor Hansen](https://www.linkedin.com/in/startuptrev), solutions architect at AWS, to discuss how adopting GitLab empowered developers to improve the customer experience, release software quicker, and leverage AWS cloud services. \n \n## Building microservices from monoliths\n\nAsk Media was looking to move away from a monolithic system to [microservices](/topics/microservices/) in order to modernize workflow and improve the overall business process. “We wanted to move over to microservices. We wanted to [leverage Kubernetes](/solutions/kubernetes/). It was a new container world that was shaping. When we looked at GitLab, it was very complete in providing what we needed to be able to build images, to run on containers,” according to Chenglim. “That was a very big deciding factor. GitLab had everything that we needed.” \n\nDevelopers can now break services into multiples and develop them independently, own the code, and have full visibility prior to deployment. “We're making the hidden logic transparent and we enable the parts of the logic to be independently developed in parallel. So you can have developers all working on their own, with different skillsets,” Chenglim says. \n\n## Containers, cost, and scalability\n\n“We needed a system that could handle change. When we look at what we did to speed up development, make it simple and transparent, and control the cost, we see a paradigm shift. GitLab gave us push-button releases. Docker and Kubernetes enabled us to switch to a microservices architecture and AWS enabled auto scaling,” says Chenglim. “On Amazon, we started building Kubernetes clusters and GitLab became our command and control interface.” \n \n Ask Media was looking for a tool that could scale and grow as needed. Cost, speed, and functionality are the tenets that AWS focuses on providing to its customers, according to Hansen. AWS works closely with Ask Media to ensure that the containers in place offer the scalability, flexibility, and timeliness they need. \n\nWith [GitLab and AWS](/partners/technology-partners/aws/), Ask Media developers built out a platform that enables the knowledge from all members of the teams. “With AWS, we wanted a product that was fairly complete and mature. AWS has a lot of history and lots of services. We definitely wanted to be able to leverage those services and to build on a platform that was a solid,” Chenglim says. “We set off to build Kubernetes clusters, right on EC2 instances. We continue to look at opportunities to leverage the resources available through AWS.”\n\nTo learn more about how Ask Media made the transition to cloud native, check out the full [webcast](/webcast/cloud-native-transformation/).\n\nCover image by [Eric Muhr](https://unsplash.com/@ericmuhr?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}",[1061,9,784,1062],"webcast","UI",{"slug":1064,"featured":6,"template":700},"from-monolith-to-microservices-how-to-leverage-aws-with-gitlab","content:en-us:blog:from-monolith-to-microservices-how-to-leverage-aws-with-gitlab.yml","From Monolith To Microservices How To Leverage Aws With Gitlab","en-us/blog/from-monolith-to-microservices-how-to-leverage-aws-with-gitlab.yml","en-us/blog/from-monolith-to-microservices-how-to-leverage-aws-with-gitlab",{"_path":1070,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1071,"content":1077,"config":1083,"_id":1085,"_type":14,"title":1086,"_source":16,"_file":1087,"_stem":1088,"_extension":19},"/en-us/blog/git-wars-switching-to-gitlab",{"title":1072,"description":1073,"ogTitle":1072,"ogDescription":1073,"noIndex":6,"ogImage":1074,"ogUrl":1075,"ogSiteName":686,"ogType":687,"canonicalUrls":1075,"schema":1076},"Git Wars: Why I'm switching to GitLab","New GitLab user Christopher Watson puts us through our paces and weighs up his Git hosting options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680411/Blog/Hero%20Images/git-wars-switching-to-gitlab.jpg","https://about.gitlab.com/blog/git-wars-switching-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git Wars: Why I'm switching to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christopher Watson\"}],\n        \"datePublished\": \"2017-07-19\",\n      }",{"title":1072,"description":1073,"authors":1078,"heroImage":1074,"date":1080,"body":1081,"category":741,"tags":1082},[1079],"Christopher Watson","2017-07-19","\n\nIt’s a well-known fact: GitHub has the market share when it comes to Git hosting, with Bitbucket following close behind due to their “unlimited private repositories” policy. But what if I told you that those weren’t your only options?\n\n\u003C!-- more -->\n\nNow I have nothing against GitHub. It’s a great tool and I’ve been using it for years. It’s built primarily in a framework I love, Ruby on Rails, and its design is one we’ve all come to tolerate (if not love). With GitHub controlling most of the market share, most open source projects have also found a home there.\n\nAll of this being said, I’d be lying if I said that GitHub didn’t have its downsides. If you want private repositories, it’s going to cost you a pretty penny at $25 per month for your first five users, and then $9 per user after that. For comparison's sake, if you have 10 users in your organization it’s going to cost you $80 a month, and that’s a pretty small team. GitHub also has a sordid history when it comes to communicating with the community, implementing new features, and updating its somewhat dated look.\n\nBitbucket is another beast altogether. Created by the tech conglomerate Atlassian, Bitbucket is their answer to GitHub. Its claim to fame is that you can have unlimited private repositories for free…as long as your team has no more than five people.\n\nThis is actually the reason I first started using Bitbucket. However, if you do have more than five people, you’re going to pay, and the quality that you pay for isn’t all that great. I’ve worked on a number of projects on Bitbucket where my team was plagued by slow pulls/pushes, 503 errors when attempting to view a repo, and just overall jankyness. Besides that, their design also leaves a lot to be desired. The new design looks better, but also somehow makes things even more confusing (someone needs to learn the difference between UI and UX). To be completely honest I don’t have anything good to say about Bitbucket, so I’m going to continue.\n\n### So what’s this GitLab thing? Is it the answer to all of our Git hosting woes?\n\nWell yes and no. GitLab is a very good product, but it’s not perfect. It certainly isn’t as fast as GitHub when it comes to pushing and pulling repos. That being said, here are my reasons for switching to GitLab for my personal projects: GitLab is a Git hosting solution with a very large toolset and, objectively, a beautifully designed website (could it still use some work? Yes, but I digress). It is completely free for unlimited users, unlimited private repositories, and full access to most of the awesome features they provide.\n\nYes, they still have paid tiers for the [enterprise](/enterprise/). You can’t expect them to keep an awesome project like this going without some kind of monetization, but for us little guys you’ll most likely never have to pay. That has got to be music to your ears.\n\n### So you said it has “awesome features.” To what are you referring?\n\nWell, there’s quite a list. Let’s take a look:\n\n1. **Syntax themes!** In case you didn’t get that, I’ll say it again: syntax themes! This has been something that I have been waiting a long time for GitHub to come out with, but GitLab beat them to the punch. We’re still probably a long way away from having custom themes, but the ability to have a dark theme when checking diffs in the browser is awesome.\n\n1. **Registry:** GitLab also has a built-in Docker registry for your projects. This is an amazingly powerful feature for those that want to keep their containers off of the public registry at hub.docker.com, but don’t want to pay for a private service.\n\n1. **Pipelines/GitLab CI:** Continuous integration is a huge time saver and a great way to make sure a pull request isn’t going to break your app. GitLab saves you from having to use an external CI service by having their own CI built right in. Not to say you can’t use an external CI if you want; GitLab has integrations for Jenkins, Bamboo, and much more.\n\n1. **3rd Party Integrations:** As mentioned above, GitLab has 3rd party integrations for several services such as CI, code coverage, messaging, etc. Their Slack integration is great for notifying your team when stuff has been merged into master. I will be honest though, I am sure GitHub has more integrations.\n\n1. **All the features that make GitHub great:** GitLab also ships with Wikis, Markdown-based readmes, etc. You don’t really lose any features by switching, but you gain a ton.\n\n![screengrab](https://about.gitlab.com/images/blogimages/git-wars-2.png){: .shadow}\u003Cbr>\n\n### So if it’s so great, why isn’t everyone using it?\n\nThere are a couple of answers to that question. First off, you have the market share factor. GitHub was one of the first Git hosting providers to market and they’ve managed to hold onto that. That means that if you want people to contribute to your project, it helps to have it on GitHub because chances are the people that you want to contribute have an account.\n\nThe other answer is related. Comfortability. People are simply comfortable with the tool they know and a lot of people aren’t like me (willing to throw everything out the window because I truly believe that the better product should get my business). This is the same reason so many people are still using Atlassian products. It’s definitely not because of their user interfaces.\n\n### So where should I go from here?\n\nThat depends on you. If you’re comfortable getting to know a new way of doing things, I’d suggest you take a look at GitLab. It really is worth the time you’ll put into it.\n\nIf you’re already *comfortable* then go ahead and stick with what you know, but at least now you know that there are alternatives.\n\n## About the Author\n\n[Chris Watson](https://twitter.com/idev0urer) is a freelance full-stack developer who occasionally enjoys sharing some of his many opinions with the world. He and his wife currently reside in sunny Arizona.\n\n_This post was originally published on [blog.cwatsondev.com](https://blog.cwatsondev.com/git-wars-why-im-switching-to-gitlab/)._\n\n“[paper battle](https://www.flickr.com/photos/die_ani/9024130/)” by [anika](https://www.flickr.com/photos/die_ani/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[743,697,9],{"slug":1084,"featured":6,"template":700},"git-wars-switching-to-gitlab","content:en-us:blog:git-wars-switching-to-gitlab.yml","Git Wars Switching To Gitlab","en-us/blog/git-wars-switching-to-gitlab.yml","en-us/blog/git-wars-switching-to-gitlab",{"_path":1090,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1091,"content":1097,"config":1103,"_id":1105,"_type":14,"title":1106,"_source":16,"_file":1107,"_stem":1108,"_extension":19},"/en-us/blog/gitlab-and-reproducibility",{"title":1092,"description":1093,"ogTitle":1092,"ogDescription":1093,"noIndex":6,"ogImage":1094,"ogUrl":1095,"ogSiteName":686,"ogType":687,"canonicalUrls":1095,"schema":1096},"How GitLab can help in research reproducibility","NYU reproducibility librarian Vicky Steeves shares why GitLab is her choice for ongoing collaborative research, and how it can help overcome challenges with sharing code in academia.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672928/Blog/Hero%20Images/gitlab-and-reproducibility.jpg","https://about.gitlab.com/blog/gitlab-and-reproducibility","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can help in research reproducibility\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vicky Steeves\"}],\n        \"datePublished\": \"2017-08-25\",\n      }",{"title":1092,"description":1093,"authors":1098,"heroImage":1094,"date":1100,"body":1101,"category":694,"tags":1102},[1099],"Vicky Steeves","2017-08-25","GitLab is a great platform for active, ongoing, collaborative research. It\nenables folks to work together easily and share that work in the open. This\nis especially poignant given the problems in sharing code in academia,\nacross time and people.\n\n\n\u003C!-- more -->\n\n\n![phd-code-comic](https://phdcomics.com/comics/archive/phd031214s.gif)\n\n\nIt's no surprise that GitLab, a platform for collaborative coding and Git\nrepository hosting, has features for reproducibility that researchers can\nleverage for their own and their communities’ benefit.\n\n\n### What exactly is reproducibility?\n\n\nReproducibility is a core component in a variety of work, from software\nengineering to research. For software engineers, the ability to reproduce\nerrors or functionality is key to development. For researchers,\nreproducibility is about independent verification of results/methods, to\nbuild on top of previous work, and to increase the impact, visibility, and\nquality of research. Y’know. That Sir Isaac Newton quote in every\nreproducibility presentation ever: \"If I have seen further, it is by\nstanding on the shoulders of giants.\"\n\n\nLike all things, reproducibility exists on a spectrum. I like Stodden et\nal’s definitions from the [2013 ICERM\nreport](http://stodden.net/icerm_report.pdf), so I’ll use those:\n\n\n| ICERM Report\nDefinitions\n| Potential Real-World\nExamples\n|\n\n|:-----------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------|\n\n| Reviewable Research: Sufficient detail for peer review and\nassessment                            | The code and data are openly\navailable\n|\n\n| Replicable Research: Tools are available to duplicate the author’s results\nusing their data    | The tools (software) used in the analysis are freely\navailable for others to confirm results                                   |\n\n| Confirmable Research: Main conclusions can be attained independently\nwithout author’s software | Others can reach the conclusion using similar\ntools, not necessarily the same as the author, or on a different operating\nsystem |\n\n| Auditable Research: Process and tools archived such that it can be\ndefended later if necessary   | The tools, environment, data, and code are\nput into a preservation-ready\nformat                                                |\n\n| Open/Reproducible Research: Auditable research made openly\navailable                           | Everything above is made available in\na repository for others to examine and\nuse                                               |\n\n\nThe last bullet there is the goal – open and reproducible research.\nReleasing code and data are key to open research, but not necessarily enough\nfor reproducibility. This is where the concept of computational\nreproducibility becomes important, where whole environments are captured.\nYou could also look at it this way:\n\n\n![reproducibility-pyramid](https://osf.io/8rx9y/download)\n\n\n### How can GitLab help?\n\n\nThere are a few solutions out there, including containers (such as Docker or\nSingularity) for active research, and [o2r](http://o2r.info/) and\n[ReproZip](https://reprozip.org) for capturing and reproducing completed\nresearch. For this post, I’m going to focus on active research and\ncontainers.\n\n\nI like GitLab for research reproducibility because it makes working together\nsimple, and seamless. There’s no hacking together 100 different third-party\nservices. GitLab has hosting, LFS, and integrated Continuous Integration for\nfree, for both public and private repositories! Everything is integrated in\na single GitLab repository which, if made publicly available, can enable\nsecondary users to reproduce results in a more streamlined fashion. You can\nalso keep these private to a group – you control the visibility of\neverything in one repository in one place, as opposed to updating\npermissions across multiple services.\n\n\nThere are a few key features that set GitLab apart when it comes to\ncontainers and reproducibility. The first is that GitLab doesn’t use a\nthird-party service for continuous integration. It’s shipped with CI runners\nwhich can use Docker images from GitLab’s registry. Basically, you can use\nthe Docker Container Registry, a secure, private Docker registry, to choose\na container that GitLab CI uses to run each job in a separate and isolated\ncontainer.\n\n\n![gitlab-ci-repro](https://about.gitlab.com/images/ci/arch-1.jpg)\n\n\nIf you don’t feel like using the GitLab registry, you can also use images\nfrom DockerHub or a custom Docker container you’re already using locally.\nThese can be integrated with GitLab CI, and if made public, any secondary\nusers can use it as well!\n\n\n### Let's look at an example\n\n\nThis process is set up in a single file, a `.gitlab-ci.yml`. Another feature\nthat makes my life easier – GitLab can syntax-check the CI config files! The\n`.gitlab-ci.yml` file describes the pipelines and stages, each of which has\na different function and can have its own tags, produce its own artifacts,\nand reuse artifacts from other stages. These stages can also run in parallel\nif needed. Here’s an example of what a basic config file looks like with R:\n\n\n```\n\nimage: jangorecki/r-base-dev\n\ntest:\n  script:\n    - R CMD build . --no-build-vignettes --no-manual\n    - PKG_FILE_NAME=$(ls -1t *.tar.gz | head -n 1)\n    - R CMD check \"${PKG_FILE_NAME}\" --no-build-vignettes --no-manual --as-cran\n```\n\n\nAnd here’s an example of building a website using the GitLab and the static\nsite generator, Nikola:\n\n\n```\n\nimage: registry.gitlab.com/paddy-hack/nikola:7.8.7\n\ntest:\n  script:\n  - nikola build\n  except:\n  - master\n\npages:\n  script:\n    - nikola build\n  artifacts:\n    paths:\n    - public\n  only:\n  - master\n```\n\n\nIt’s also worth noting that you can use different containers per step in\nyour workflow, if you outline it in your .gitlab-ci.yml. If your data\ncollection script runs in one environment but your analysis script needs\nanother, that’s perfectly fine using GitLab, and others have the information\nto reproduce it easily! Another feature that puts GitLab apart is that a\nbuild of one project can trigger a build of another – AKA, multi-project\npipelines. For those of you working with big data, you can automatically\nspin up and down VMs to make sure your builds get processed immediately with\nGitLab’s CI as well.\n\n\nHere are some other great resources and examples of using GitLab to make\nresearch more reproducible:\n\n\n+ [Gitlab-CI for R packages](https://gitlab.com/jangorecki/r.gitlab.ci)\n\n+ [Blog Post explaining GitLab + reproducibility - Jon\nZelner](http://www.jonzelner.net/statistics/make/docker/reproducibility/2016/05/31/reproducibility-pt-1/)\n\n+ [GitLab repo accompanying blog post - Jon\nZelner](https://gitlab.com/jzelner/reproducible-stan)\n\n+ [Continuous Integration with Gitlab - Tony\nWildish](https://www.nersc.gov/assets/Uploads/2017-02-06-Gitlab-CI.pdf)\n\n\nBeyond reproducibility, there are a lot of features that make GitLab an\nideal place for me to work and organize my research. I’d urge folks to look\nat the [feature list](/pricing/feature-comparison/) and see how they can get started!\n\n\n## About the Guest Author\n\n\nVicky Steeves is the Librarian for Research Data Management and\nReproducibility at New York University, a dual appointment between the\nDivision of Libraries and Center for Data Science. In this role, she works\nsupporting researchers in creating well-managed, high quality, and\nreproducible research through facilitating use of tools such as ReproZip.\nHer research centers on integrating reproducible practices into the research\nworkflow, advocating openness in all facets of scholarship, and\nbuilding/contributing to open infrastructure.\n\n\n“[research](https://www.flickr.com/photos/alovesdc/3464555556/)” by [a loves\ndc](https://www.flickr.com/photos/alovesdc/) is licensed under [CC BY\n2.0](https://creativecommons.org/licenses/by/2.0/legalcode)\n\n{: .note}\n",[697,807,9],{"slug":1104,"featured":6,"template":700},"gitlab-and-reproducibility","content:en-us:blog:gitlab-and-reproducibility.yml","Gitlab And Reproducibility","en-us/blog/gitlab-and-reproducibility.yml","en-us/blog/gitlab-and-reproducibility",{"_path":1110,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1111,"content":1117,"config":1124,"_id":1126,"_type":14,"title":1127,"_source":16,"_file":1128,"_stem":1129,"_extension":19},"/en-us/blog/gitlab-for-education-student-spotlights",{"title":1112,"description":1113,"ogTitle":1112,"ogDescription":1113,"noIndex":6,"ogImage":1114,"ogUrl":1115,"ogSiteName":686,"ogType":687,"canonicalUrls":1115,"schema":1116},"Apply to be featured as a GitLab Student Spotlight","Feature your work on GitLab.com and get GitLab swag!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/gitlab-for-education-student-spotlights","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Apply to be featured as a GitLab Student Spotlight\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-06-17\",\n      }",{"title":1112,"description":1113,"authors":1118,"heroImage":1114,"date":1120,"body":1121,"category":718,"tags":1122},[1119],"Christina Hupy, Ph.D.","2020-06-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\nCalling all Students, Professors, and participants in the [GitLab Education Program](https://about.gitlab.com/solutions/education/): [Apply to the GitLab Student Spotlights](https://docs.google.com/forms/d/e/1FAIpQLSdHzG0IEDw6VlUQDqWsweNRDIdM2HQpoBH-t2OzK1m0_SMeiQ/viewform?usp=sf_link), have your work featured on GitLab.com, and earn some GitLab Swag!\n\nWe want to know: how are you using GitLab for teaching, learning, or research?\n\n## GitLab for Education\n\nSince 2018, the [GitLab for Education](https://about.gitlab.com/solutions/education/) program has been providing free Gold subscriptions and Ultimate licenses to qualifying institutions for the purpose of teaching and learning. In two years, we’ve already provided licenses to over 800 educational institutions worldwide, with a total of 1.6+ million participants!\n\nAt GitLab, we believe that [Everyone can Contribute](https://about.gitlab.com/community/contribute/) - and we want to learn more about how our education communities around the world are contributing to and using GitLab.\n\n## What are Student Spotlights?\n\nThe Student Spotlights program will highlight amazing projects that students and professors are building and creating using GitLab.\n\nSelected projects will be featured via a video-recorded interview with the students and professors (if available) involved. Interviews and project links will be featured on the GitLab Unfiltered YouTube channel and the GitLab for Education main webpage!\n\nThe aim of Student Spotlights is two-fold. By showcasing the great work of our program participants, we want to inspire others to join and share their GitLab projects. Your stories and your work are the best way to share and spread the word!\n\nWe also want to connect and build relationships with institutions already using GitLab for teaching and learning. Our student spotlight participants will be some of the first engaged in our quickly growing education community!\n\n## Who Qualifies\n\nStudents, professors, researchers, and leaders in education using GitLab for Education licenses are eligible to submit projects to the Student Spotlights program.\n\n### Examples of Projects for Student Spotlights\n\n- Students using GitLab to host a blog or portfolio\n- Hosting a coding project using a GitLab repository\n- Managing a student club or project using GitLab issues\n- And more! We want to see all the creative, challenging, and dynamic ways you’re using GitLab.\n\n## Apply for the Student Spotlights Program\n\n- [Submit the Google Form to apply to be featured as a Student Spotlight](https://docs.google.com/forms/d/e/1FAIpQLSdHzG0IEDw6VlUQDqWsweNRDIdM2HQpoBH-t2OzK1m0_SMeiQ/viewform?usp=sf_link)\n- Project applications will be reviewed by the Developer Relations team within 1 week of receiving your application.\n- If your project is chosen, you will be contacted via email to select a time for your video-recorded interview.\n- Christina Hupy, Program Manager of the GitLab Education program, or Samantha Lee, GitLab Community Advocate, will host a 15 minute recorded video interview with each participant to highlight their projects and share how they use GitLab for Education\n- Recorded interviews will be uploaded to GitLab Unfiltered and projects will be featured on the GitLab.com GitLab for Education page\n\n\nHave questions about GitLab Student Spotlights, or the GitLab for Education program? Email us at education@gitlab.com and let us know. We can't wait to see how you are using GitLab for Education!\n",[269,1123,807,9],"production",{"slug":1125,"featured":6,"template":700},"gitlab-for-education-student-spotlights","content:en-us:blog:gitlab-for-education-student-spotlights.yml","Gitlab For Education Student Spotlights","en-us/blog/gitlab-for-education-student-spotlights.yml","en-us/blog/gitlab-for-education-student-spotlights",{"_path":1131,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1132,"content":1138,"config":1145,"_id":1147,"_type":14,"title":1148,"_source":16,"_file":1149,"_stem":1150,"_extension":19},"/en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab",{"title":1133,"description":1134,"ogTitle":1133,"ogDescription":1134,"noIndex":6,"ogImage":1135,"ogUrl":1136,"ogSiteName":686,"ogType":687,"canonicalUrls":1136,"schema":1137},"GitLab Heroes Unmasked: How I am elevating my company using GitLab","Tickett Enterprises Limited Director Lee Tickett shares the details of his ongoing journey to use the DevOps platform to its fullest.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667569/Blog/Hero%20Images/heroestickett.jpg","https://about.gitlab.com/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Heroes Unmasked: How I am elevating my company using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2022-05-12\",\n      }",{"title":1133,"description":1134,"authors":1139,"heroImage":1135,"date":1141,"body":1142,"category":1143,"tags":1144},[1140],"Lee Tickett","2022-05-12","_A key to GitLab’s success is our vast community of advocates. Here at\nGitLab, we call these active contributors [\"GitLab\nHeroes\"](/community/heroes/). Each hero contributes to GitLab in numerous\nways, including elevating releases, sharing best practices, speaking at\nevents, and more. The \"GitLab Heroes Unmasked\" series is dedicated to\nsharing their stories._\n\n\nLee Tickett, director at IT development and support consultancy Tickett\nEnterprises Limited, is a GitLab hero and Core team member who continuously\ncontributes to GitLab and provides exceptional feedback. In late 2020, he\n[wrote a blog](/blog/lee-tickett-my-gitlab-journey/) about how he\ncame upon GitLab and began to use it as his company's platform.\n\n\nAt that point, his company was using GitLab in the following ways:\n\n\n- for version control\n\n- with a custom merge request approval process\n\n- as a custom UI for streamlined/standardized project creation\n\n- as an integration with our bespoke helpdesk platform\n\n- as a Windows runner with fairly basic CI\n\n\nThis blog picks up where that blog left off and gives insight into how\nTickett Enterprises is making the most of GitLab's One DevOps Platform for\nits helpdesk, CRM integration, CI/CD, and more.\n\n\n## Migrating the helpdesk\n\n\nQuite some time ago, I decided to migrate from the bespoke helpdesk platform\nand use GitLab for issue tracking. Here's [an\nepic](https://gitlab.com/groups/gitlab-org/-/epics/5323) I created just over\ntwo years ago to start discussing my plans.\n\n\nI built a bespoke migration tool using C#, which connects directly to the\nexisting\n\nhelpdesk database and pushes the data into GitLab using the API. This\nincludes:\n\n\n- groups (each company in our helpdesk will become a group in GitLab with a\nsingle `Helpdesk` project)\n\n- issues (every ticket in our helpdesk will become an issue in GitLab,\nestimates will be included and quotes converted to weights)\n\n- notes\n\n- attachments\n\n- time logs\n\n- labels (type, class, department, and \"status\" will be migrated to labels)\n\n\n### Helpdesk workflow\n\n\nAfter discussing different approaches with the GitLab team and the\ncommunity, we came up with the first iteration of our workflow process. The\nstatus of tickets in our helpdesk system becomes scoped labels in GitLab. It\nlooks similar to the following:\n\n\n![Workflow Issue\nBoard](https://about.gitlab.com/images/blogimages/workflow-issue-board.png)\n\n\nWe have two relatively small teams so we can also leverage boards to\ndistribute and manage\n\nwork within the team:\n\n\n![Department Issue\nBoard](https://about.gitlab.com/images/blogimages/department-issue-board.png)\n\n\nWe will be leveraging the [GitLab\nTriage](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage)\n\nRubyGem and [Triage Ops](https://gitlab.com/gitlab-org/quality/triage-ops)\nproject to handle\n\nreactive and scheduled automation, such as: \n\n\n- opening pending issues once they reach their due date (this field has been\nslightly repurposed)\n\n- nudging users when issues have been pended, but no due date has been\nassigned\n\n- nudging the team when issues have not been triaged (labeled,\nestimates/quotes attached, etc.)\n\n\nGitLab triage will run as a scheduled pipeline from inside of GitLab, and\nTriage Ops (formerly known as Triage Serverless) will run as webhooks in AWS\nLambda (triggered by webhooks). We may potentially transition some of our\nexisting customizations from C# to GitLab Triage/Triage Ops, too. \n\n\n## Building out CRM\n\n\nOne of the biggest challenges moving our helpdesk over to GitLab was the\ninability to tie issues to Customers. So, roughly a year ago, I decided to\nstart building out a [Customer Relations\nManagement](https://docs.gitlab.com/ee/user/crm/) (CRM) feature. \n\n\nYou can see some of the work that has gone into the CRM so far: [CRM Merged\nMRs](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&state=merged&label_name[]=CRM).\n\n\nIt’s surprising how much work is needed for what seems like a mostly simple\nfeature. Despite careful planning, there were many surprises that caused\nsignificant headaches. I was hoping to formally release this in December\n2021, but it looks like June 2022 is more feasible now.\n\n\n### Reporting\n\n\nCompared to our previous bespoke SQL Server Reporting Services (SSRS) report\nsuite pulling directly from our helpdesk, reporting is very limited. We\ntried using SSRS with a SQL Server linked to our GitLab Postgres server, but\nkept hitting walls. We are now moving forward using Google Data Studio (with\na direct database connection).\n\n\nAlthough we still have a way to go, we've managed to achieve some really\ngreat results.\n\n\n![Scheduled Pipelines\nReport](https://about.gitlab.com/images/blogimages/scheduled-pipelines-report.png)\n\n\nHere's an example of a report we've started to build to increase the\nvisibility of our scheduled interfaces now that we're leveraging CI/CD more.\n\n\n### Challenges\n\n\nOne obstacle we were faced with was the inability to achieve a lot of our\ngoals at the instance level. Some GitLab functionality is at the project\nlevel, some at the group, and some at an instance. As a result, we had to\ncreate a temporary single root group and create all groups beneath it. \n\n\n## Moving to Linux/Docker for CI/CD pipelines\n\n\nWe have almost moved completely to Linux/Docker for our CI/CD pipelines,\nusing several custom images:\n\n\n- our [custom .NET image](https://gitlab.com/tickett/dotnet.core.selenium)\nsimply adds chromedriver to the default\n`mcr.microsoft.com/dotnet/core/sdk:latest` image to add Selenium support for\nUI testing\n\n- our [custom Android/Gradle\nimage](https://gitlab.com/tickett/docker-android-gradle) provides a stable\nbuild environment for our Clover apps (which require v1 APK signing no\nlonger supported in Android Studio).\n\n\nYou can see sample `.gitlab-ci.yml` templates in the relevant projects.\n\n\nWe now have our test summary and [coverage\nvisualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html)\ndisplayed in merge requests, which is a total game changer! \n\n\n## GitLab for intranet\n\n\nWe've been using SharePoint for as long as I can remember, and I'm not a\nfan.\n\n\nAs great as a WYSIWYG interface is, I believe it brings with it:\n\n\n- a lack of consistency\n\n- a pretty awful audit trail\n\n- no review/approval process\n\n\nSo let's try and learn from the best. Can we use GitLab pages? Absolutely!\n\n\nWe picked Hugo purely as it seems the most popular (most forked GitLab pages\nproject template). Similarly, the [Relearn\ntheme](https://themes.gohugo.io/themes/hugo-theme-relearn/) seems to be the\nmost popular for docs. \n\n\nIt's still a work in progress, but we’re exploring a structure similar to:\n\n\n```text\n\nClients\n\n-Client A\n\n--System A\n\n--System B\n\n-Client B\n\n--System C\n\n--System D\n\nInternal\n\n-Process A\n\n-Process B\n\n```\n\n\nNot too dissimilar to GitLab, but hugely amplified, we want to pull multiple\nprojects, not just our Hugo repo.\n\n\nThe following  is our `.gitlab-ci.yml`:\n\n\n```yaml\n\nimage: registry.gitlab.com/pages/hugo:latest\n\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\ngrab-docs:\n tags:\n   - docker\n image:\n   name: ruby:2.7.5-slim\n script:\n   - cd ${CI_PROJECT_DIR}\n   - gem install gitlab\n   - ruby grab_docs.rb\n artifacts:\n   untracked: true\n\ntest:lint:\n tags:\n   - docker\n image:\n   entrypoint: [\"\"]\n   name: davidanson/markdownlint-cli2\n script:\n   - cp $MARKDOWN_LINT_CONFIG ./.markdownlint-cli2.jsonc\n   - markdownlint-cli2 \"content/**/*.md\"\n needs:\n   - grab-docs\n\ntest:\n tags:\n   - docker\n script:\n   - apk add --update --no-cache git\n   - hugo\n except:\n   - master\n needs:\n   - test:lint\n\npages:\n tags:\n   - docker\n script:\n   - apk add --update --no-cache git\n   - hugo\n artifacts:\n   paths:\n     - public\n only:\n   - master\n needs:\n   - grab-docs\n   - test:lint\n```\n\n\nThe first `grab-docs` step runs a custom Ruby script to:\n\n\n- interrogate our GitLab instance, looping through all groups and projects\n\n- grab the `README.md` and `/doc` folder\n\n- add frontmatter for last update date and link to the repo \n\n- update and fix all markdown paths\n\n\n```ruby\n\n#!/usr/bin/env ruby\n\n\nrequire 'fileutils'\n\nrequire 'gitlab'\n\n\n$api = Gitlab.client(endpoint: ENV['PRODUCTION_API_ENDPOINT'],\nprivate_token: ENV['GITLAB_API_TOKEN'].to_s)\n\n$projects = $api.projects(per_page: 50)\n\n\ndef grab_files(project)\n file = $api.file_contents(project.id, 'README.md')\n return unless file&.start_with?('\n","devsecops",[269,856,9],{"slug":1146,"featured":6,"template":700},"gitlab-heroes-unmasked-elevating-my-company-using-gitlab","content:en-us:blog:gitlab-heroes-unmasked-elevating-my-company-using-gitlab.yml","Gitlab Heroes Unmasked Elevating My Company Using Gitlab","en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab.yml","en-us/blog/gitlab-heroes-unmasked-elevating-my-company-using-gitlab",{"_path":1152,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1153,"content":1159,"config":1166,"_id":1168,"_type":14,"title":1169,"_source":16,"_file":1170,"_stem":1171,"_extension":19},"/en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"title":1154,"description":1155,"ogTitle":1154,"ogDescription":1155,"noIndex":6,"ogImage":1156,"ogUrl":1157,"ogSiteName":686,"ogType":687,"canonicalUrls":1157,"schema":1158},"Go tools and GitLab: How to do continuous integration like a boss","How the team at Pantomath makes their lives easier with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Go tools and GitLab: How to do continuous integration like a boss\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julien Andrieux\"}],\n        \"datePublished\": \"2017-11-27\",\n      }",{"title":1154,"description":1155,"authors":1160,"heroImage":1156,"date":1162,"body":1163,"category":741,"tags":1164},[1161],"Julien Andrieux","2017-11-27","At [Pantomath](https://pantomath.io/), we use [GitLab](/) for all our\ndevelopment work. The purpose of this paper is not to present GitLab and all\n[its features](/pricing/feature-comparison/), but to introduce how we use these tools to ease\nour lives. So what is it all about? To automate everything that is related\nto your development project, and let you focus on your code.\n\n\n\u003C!-- more -->\n\n\nWe’ll cover the [lint](https://en.wikipedia.org/wiki/Lint_(software)), [unit\ntests](https://en.wikipedia.org/wiki/Unit_testing), [data\nrace](https://en.wikipedia.org/wiki/Race_condition), [memory\nsanitizer](https://clang.llvm.org/docs/MemorySanitizer.html), [code\ncoverage](https://en.wikipedia.org/wiki/Code_coverage), and build.\n\n\nAll the source code shown in this post is available at\n[gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\nSo feel free to get the repository, and use the tags to navigate in it. The\nrepository should be placed in the `src` folder of your `$GOPATH`:\n\n\n```bash\n\n$ go get -v -d gitlab.com/pantomath-io/demo-tools\n\n$ cd $GOPATH/src/gitlab.com/pantomath-io/demo-tools\n\n```\n\n\n### Go tools\n\n\nLuckily, `Go` — the open source programming language also known as golang —\ncomes with a [lot of useful tools](https://golang.org/cmd/go/), to build,\ntest, and check your code. In fact, it’s all there. We’ll just add extra\ntools to glue them together. But before we go there, we need to take them\none by one, and see what they do.\n\n\n#### Package list\n\n\nYour Go project is a collection of packages, as described in the [official\ndoc](https://golang.org/doc/code.html). Most of the following tools will be\nfed with these packages, and thus the first command we need is a way to list\nthe packages. Hopefully, the `Go` language covers our back with the `list`\nsubcommand ([read the fine\nmanual](https://golang.org/cmd/go/#hdr-List_packages) and this [excellent\npost from Dave\nCheney](https://dave.cheney.net/2014/09/14/go-list-your-swiss-army-knife)):\n\n\n```bash\n\n$ go list ./...\n\n```\n\n\nNote that we want to avoid applying our tools on external packages or\nresources, and restrict it to **our** code. So we need to get rid of the\n[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories):\n\n\n```bash\n\n$ go list ./... | grep -v /vendor/\n\n```\n\n\n#### Lint\n\n\nThis is the very first tool we use on the code: the linter. Its role is to\nmake sure that the code respects the code style. This may sounds like an\noptional tool, or at least a “nice-to-have” but it really helps to keep\nconsistent style over your project.\n\n\nThis linter is not part of Go *per se*, so you need to grab it and install\nit by hand (see [official doc](https://github.com/golang/lint)).\n\n\nThe usage is fairly simple: you just run it on the packages of your code\n(you can also point the `.go` files):\n\n\n```bash\n\n$ golint -set_exit_status $(go list ./... | grep -v /vendor/)\n\n```\n\n\nNote the `-set_exit_status` option. By default, `golint` only prints the\nstyle issues, and returns (with a 0 return code), so the CI never considers\nsomething went wrong. If you specify the `-set_exit_status`, the return code\nfrom `golint` will be different from 0 if any style issue is encountered.\n\n\n#### Unit test\n\n\nThese are the most common tests you can run on your code. For each `.go`\nfile, we need to have an associated `_test.go` file holding the unit tests.\nYou can run the tests for all the packages with the following command:\n\n\n```bash\n\n$ go test -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Data race\n\n\nThis is usually a hard subject to cover, but the `Go` tool has it by default\n(but only available on `linux/amd64`, `freebsd/amd64`, `darwin/amd64` and\n`windows/amd64`). For more information about data race, see [this\narticle](https://golang.org/doc/articles/race_detector.html). Meanwhile,\nhere is how to run it:\n\n\n```bash\n\n$ go test -race -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Memory sanitizer\n\n\nClang has a nice detector for uninitialized reads called\n[MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html). The `go\ntest` tool is kind enough to interact with this Clang module (as soon as you\nare on `linux/amd64` host and using a recent version of Clang/LLVM\n(`>=3.8.0`). This command is how to run it:\n\n\n```bash\n\n$ go test -msan -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Code coverage\n\n\nThis is also a must have to evaluate the health of your code, and see what\nthe part of code is under unit tests and what part is not. [Rob\nPike](https://twitter.com/rob_pike) wrote a [full post on that very\nsubject](https://blog.golang.org/cover).\n\n\nTo calculate the code coverage ratio, we need to run the following script:\n\n\n```bash\n\n$ PKG_LIST=$(go list ./... | grep -v /vendor/)\n\n$ for package in ${PKG_LIST}; do\n    go test -covermode=count -coverprofile \"cover/${package##*/}.cov\" \"$package\" ;\ndone\n\n$ tail -q -n +2 cover/*.cov >> cover/coverage.cov\n\n$ go tool cover -func=cover/coverage.cov\n\n```\n\n\nIf we want to get the coverage report in HTML format, we need to add the\nfollowing command:\n\n\n```bash\n\n$ go tool cover -html=cover/coverage.cov -o coverage.html\n\n```\n\n\n#### Build\n\n\nLast but not least, once the code has been fully tested, we might want to\ncompile it to make sure we can build a working binary.\n\n\n```bash\n\n$ go build -i -v gitlab.com/pantomath-io/demo-tools\n\n```\n\n\n### Makefile\n\n\n*git tag:*\n[init-makefile](https://gitlab.com/pantomath-io/demo-tools/tags/init-makefile)\n\n\n![](https://cdn-images-1.medium.com/max/1600/1*Ip_q_6I-kNpUjuPMOutuTA.jpeg)\n\n*\u003Csmall>Photo by [Matt\nArtz](https://unsplash.com/photos/qJE5Svhs2ek?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\n\nNow we have all the tools that we may use in the context of continuous\nintegration, we can wrap them all in a\n[Makefile](https://gitlab.com/pantomath-io/demo-tools/blob/init-makefile/Makefile),\nand have a consistent way to call them.\n\n\nThe purpose of this doc is not to present `make`, but you can refer to\n[official documentation](https://www.gnu.org/software/make/manual/make.html)\nto learn more about it.\n\n    PROJECT_NAME := \"demo-tools\"\n    PKG := \"gitlab.com/pantomath-io/$(PROJECT_NAME)\"\n    PKG_LIST := $(shell go list ${PKG}/... | grep -v /vendor/)\n    GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v _test.go)\n\n    .PHONY: all dep build clean test coverage coverhtml lint\n\n    all: build\n\n    lint: ## Lint the files\n      @golint -set_exit_status ${PKG_LIST}\n\n    test: ## Run unittests\n      @go test -short ${PKG_LIST}\n\n    race: dep ## Run data race detector\n      @go test -race -short ${PKG_LIST}\n\n    msan: dep ## Run memory sanitizer\n      @go test -msan -short ${PKG_LIST}\n\n    coverage: ## Generate global code coverage report\n      ./tools/coverage.sh;\n\n    coverhtml: ## Generate global code coverage report in HTML\n      ./tools/coverage.sh html;\n\n    dep: ## Get the dependencies\n      @go get -v -d ./...\n\n    build: dep ## Build the binary file\n      @go build -i -v $(PKG)\n\n    clean: ## Remove previous build\n      @rm -f $(PROJECT_NAME)\n\n    help: ## Display this help screen\n      @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nWhat do we have now? One target for any tool previously presented, and three\nmore targets for:\n\n\n* installation of dependencies (`dep`);\n\n* housekeeping of the project (`clean`);\n\n* some nice and shiny help (`help`).\n\n\nNote that we also had to create a script for the code coverage work. This is\nbecause implementing loops over files in a Makefile is a pain. So the work\nis done in a `bash` script, and the Makefile only triggers this script.\n\n\nYou can try the Makefile with the following commands:\n\n    $ make help\n    $ make lint\n    $ make coverage\n\n### Continuous integration\n\n\n*git tag:*\n[init-ci](https://gitlab.com/pantomath-io/demo-tools/tags/init-ci)\n\n\nNow the tools are in place, and we can run various tests on our code, we’d\nlike to automate these, on your repository. Luckily, GitLab offers [CI\npipelines](/solutions/continuous-integration/) just for this. And the setup\nfor this is pretty straightforward: all you create is a `.gitlab-ci.yml`\nfile at the root of the repository.\n\n\nThe [full documentation](https://docs.gitlab.com/ee/ci/yaml/) on this Yaml\nfile presents all the options, but you can start with this `.gitlab-ci.yml`:\n\n\n```yaml\n\nimage: golang:1.9\n\n\ncache:\n  paths:\n    - /apt-cache\n    - /go/src/github.com\n    - /go/src/golang.org\n    - /go/src/google.golang.org\n    - /go/src/gopkg.in\n\nstages:\n  - test\n  - build\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.com/pantomath-io /go/src/_/builds\n  - cp -r $CI_PROJECT_DIR /go/src/gitlab.com/pantomath-io/pantomath\n  - ln -s /go/src/gitlab.com/pantomath-io /go/src/_/builds/pantomath-io\n  - make dep\n\nunit_tests:\n  stage: test\n  script:\n    - make test\n\nrace_detector:\n  stage: test\n  script:\n    - make race\n\nmemory_sanitizer:\n  stage: test\n  script:\n    - make msan\n\ncode_coverage:\n  stage: test\n  script:\n    - make coverage\n\ncode_coverage_report:\n  stage: test\n  script:\n    - make coverhtml\n  only:\n  - master\n\nlint_code:\n  stage: test\n  script:\n    - make lint\n\nbuild:\n  stage: build\n  script:\n    - make\n```\n\n\nIf you break down the file, here are some explanations on its content:\n\n\n* The first thing is to choose what Docker image will be used to run the CI.\nHead to the [Docker Hub](https://hub.docker.com/) to choose the right image\nfor your project.\n\n* Then, you specify some folders of this image [to be\ncached](https://docs.gitlab.com/ee/ci/yaml/#cache). The goal here is to\navoid downloading the same content several times. Once a job is completed,\nthe listed paths will be archived, and next job will use the same archive.\n\n* You define the different `stages` that will group your jobs. In our case,\nwe have two [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) (to be\nprocessed in that order): `test` and `build`. We could have other stages,\nsuch as `deploy`.\n\n* The `before_script`\n[section](https://docs.gitlab.com/ee/ci/yaml/#before_script) defines the\ncommands to run in the Docker container right before the job is actually\ndone. In our context, the commands just copy or link the repository deployed\nin the `$GOPATH`, and install dependencies.\n\n* Then come the actual [jobs](https://docs.gitlab.com/ee/ci/jobs/), using\nthe `Makefile` targets. Note the special case for `code_coverage_report`\nwhere execution is restricted to the `master` branch (we don’t want to\nupdate the code coverage report from feature branches for instance).\n\n\nAs we commit/push the `.gitlab-ci.yml` file in the repository, the CI is\n[automatically\ntriggered](https://gitlab.com/pantomath-io/demo-tools/pipelines/13481935).\nAnd the pipeline fails. Howcome?\n\n\nThe `lint_code`\n[job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690212) fails\nbecause it can’t find the `golint` binary:\n\n\n```bash\n\n$ make lint\n\nmake: golint: Command not found\n\nMakefile:11: recipe for target 'lint' failed\n\nmake: *** [lint] Error 127\n\n```\n\n\nSo,\n[update](https://gitlab.com/pantomath-io/demo-toolscommit/17a0206eb626504e559f56773e2d81c7b5808dbe)\nyour `Makefile` to install `golint` as part of the `dep` target.\n\n\nThe `memory_sanitizer`\n[job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690209) fails\nbecause `gcc` complains:\n\n\n```bash\n\n$ make msan\n\n# runtime/cgo\n\ngcc: error: unrecognized argument to -fsanitize= option: 'memory'\n\nMakefile:20: recipe for target 'msan' failed\n\nmake: *** [msan] Error 2\n\n```\n\n\nBut remember we need to use Clang/LLVM `>=3.8.0` to enjoy the `-msan` option\nin `go test` command.\n\n\nWe have two options here:\n\n\n* either we set up Clang in the job (using `before_script`);\n\n* or we use a Docker image with Clang installed by default.\n\n\nThe first option is nice, but that implies to have this setup done **for\nevery single job**. This is going to be so long, we should do it once and\nfor all. So we prefer the second option, which is a good way to play with\n[GitLab\nRegistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\n\n*git tag:*\n[use-own-docker](https://gitlab.com/pantomath-io/demo-tools/tags/use-own-docker)\n\n\nWe need to create a\n[Dockerfile](https://gitlab.com/pantomath-io/demo-tools/blob/use-own-docker/Dockerfile)\nfor the container (as usual: read the [official\ndocumentation](https://docs.docker.com/engine/reference/builder) for more\noptions about it):\n\n    # Base image:\n    FROM golang:1.9\n    MAINTAINER Julien Andrieux \u003Cjulien@pantomath.io>\n\n    # Install golint\n    ENV GOPATH /go\n    ENV PATH ${GOPATH}/bin:$PATH\n    RUN go get -u github.com/golang/lint/golint\n\n    # Add apt key for LLVM repository\n    RUN wget -O -\n     | apt-key add -\n\n    # Add LLVM apt repository\n    RUN echo \"deb\n     llvm-toolchain-stretch-5.0 main\" | tee -a /etc/apt/sources.list\n\n    # Install clang from LLVM repository\n    RUN apt-get update && apt-get install -y --no-install-recommends \\\n        clang-5.0 \\\n        && apt-get clean \\\n        && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n    # Set Clang as default CC\n    ENV set_clang /etc/profile.d/set-clang-cc.sh\n    RUN echo \"export CC=clang-5.0\" | tee -a ${set_clang} && chmod a+x ${set_clang}\n\nThe container built out of this Dockerfile will be based on\n[golang:1.9](https://hub.docker.com/_/golang/) image (the one referenced in\nthe `.gitlab-ci.yml` file).\n\n\nWhile we’re at it, we install `golint` in the container, so we have it\navailable. Then we follow [official way](http://apt.llvm.org/) of installing\nClang 5.0 from LLVM repository.\n\n\nNow we have the Dockerfile in place, we need to build the container image\nand make it available for GitLab:\n\n\n```bash\n\n$ docker login registry.gitlab.com\n\n$ docker build -t registry.gitlab.com/pantomath-io/demo-tools .\n\n$ docker push registry.gitlab.com/pantomath-io/demo-tools\n\n```\n\n\nThe first command connects you to the GitLab Registry. Then you build the\ncontainer image described in the Dockerfile. And finally, you push it to the\nGitLab Registry.\n\n\nTake a look at the [Registry for your\nrepository](https://gitlab.com/pantomath-io/demo-tools/container_registry),\nyou’ll see your image, ready to be used. And to have the CI using your\nimage, you just need to update the `.gitlab-ci.yml` file:\n\n    image: golang:1.9\n\nbecomes\n\n    image: registry.gitlab.com/pantomath-io/demo-tools:latest\n\nOne last detail: you need to tell the CI to use the proper compiler (i.e.\nthe `CC` environment variable), so we add the variable initialization in the\n`.gitlab-ci.yml` file:\n\n    export CC=clang-5.0\n\nOnce the modification are done, next commit will trigger the pipeline, which\nnow works:\n\n\n[gitlab.com/pantomath-io/demo-tools/pipelines/13497136](https://gitlab.com/pantomath-io/demo-tools/pipelines/13497136)\n\n\n### Badges\n\n\n*git tag:*\n[init-badges](https://gitlab.com/pantomath-io/demo-tools/tags/init-badges)\n\n\n![](https://cdn-images-1.medium.com/max/1600/1*0pY_6oCiHZ_eLh0vfg5rDA.jpeg)\n\n\n*\u003Csmall>Photo by [Jakob\nOwens](https://unsplash.com/photos/ZBadHaTUkP0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\n\nNow the tools are in place, every commit will launch a test suite, and you\nprobably want to show it, and that’s legitimate :) The best way to do so is\nto use badges, and the best place for it is the `README`\n[file](https://gitlab.com/pantomath-io/demo-tools/blob/init-badges/README.md).\n\n\nEdit it and add the four following badges:\n\n\n* Build Status: the status of the last pipeline on the `master` branch:\n\n\n```\n\n[![Build\nStatus](https://gitlab.com/pantomath-io/demo-tools/badges/master/build.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n\n```\n\n\n* Coverage Report: the percentage of source code covered by tests\n\n\n```\n\n[![Coverage\nReport](https://gitlab.com/pantomath-io/demo-tools/badges/master/coverage.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n\n```\n\n\n* Go Report Card:\n\n\n```\n\n[![Go Report\nCard](https://goreportcard.com/badge/gitlab.com/pantomath-io/demo-tools)](https://goreportcard.com/report/gitlab.com/pantomath-io/demo-tools)\n\n```\n\n\n* License:\n\n\n```\n\n[![License\nMIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://img.shields.io/badge/License-MIT-brightgreen.svg)\n\n```\n\n\nThe coverage report needs a special configuration. You need to tell GitLab\nhow to get that information, considering that there is a job in the CI that\n*displays* it when it runs.\u003Cbr> There is a\n[configuration](https://gitlab.com/help/user/project/pipelines/settings#test-coverage-parsing)\nto provide GitLab with a regexp, used in any job’ output. If the regexp\nmatches, GitLab consider the match to be the code coverage result.\n\n\nSo head to `Settings > CI/CD` in your repository, scroll down to the `Test\ncoverage parsing` setting in the `General pipelines settings` section, and\nuse the following regexp:\n\n    total:\\s+\\(statements\\)\\s+(\\d+.\\d+\\%)\n\nYou’re all set! Head to the [overview of your\nrepository](https://gitlab.com/pantomath-io/demo-tools/tree/init-badges),\nand look at your `README`:\n\n\n### Conclusion\n\n\nWhat’s next? Probably more tests in your CI. You can also look at the CD\n([Continuous\nDeployment](https://docs.gitlab.com/ee/ci/environments/index.html)) to\nautomate the deployment of your builds. The documentation can be done using\n[GoDoc](https://godoc.org/-/about). Note that you generate a coverage report\nwith the `code_coverage_report`, but don’t use it in the CI. You can make\nthe job copy the HTML file to a web server, using `scp` (see this\n[documentation](https://docs.gitlab.com/ee/ci/ssh_keys/) on how to use SSH\nkeys).\n\n\nMany thanks to [Charles Francoise](https://dev.to/loderunner) who co-wrote\nthis paper and\n[gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\n\n\n## About the Guest Author\n\n\nJulien Andrieux is currently working on Pantomath. Pantomath is a modern,\nopen source monitoring solution, built for performance, that bridges the\ngaps across all levels of your company. The wellbeing of your infrastructure\nis everyone’s business. [Keep up with the project](http://goo.gl/tcxtXq).\n\n *[Go tools & GitLab - how to do Continuous Integration like a boss](https://medium.com/pantomath/go-tools-gitlab-how-to-do-continuous-integration-like-a-boss-941a3a9ad0b6) was originally published on Medium.*\n\n*Cover photo by [Todd\nQuackenbush](https://unsplash.com/photos/IClZBVw5W5A?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)*\n\n{: .note}\n",[109,9,1165],"tutorial",{"slug":1167,"featured":6,"template":700},"go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","content:en-us:blog:go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","Go Tools And Gitlab How To Do Continuous Integration Like A Boss","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"_path":1173,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1174,"content":1180,"config":1185,"_id":1187,"_type":14,"title":1188,"_source":16,"_file":1189,"_stem":1190,"_extension":19},"/en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"title":1175,"description":1176,"ogTitle":1175,"ogDescription":1176,"noIndex":6,"ogImage":1177,"ogUrl":1178,"ogSiteName":686,"ogType":687,"canonicalUrls":1178,"schema":1179},"Goldman Sachs partners with GitLab for next-gen platform strategies","Goldman Sachs’ George Grant shares how partnering with GitLab has modernized the development ecosystem.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671845/Blog/Hero%20Images/serverless-ops-blog.jpg","https://about.gitlab.com/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Goldman Sachs partners with GitLab for next-gen platform strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2020-01-24\",\n      }",{"title":1175,"description":1176,"authors":1181,"heroImage":1177,"date":1182,"body":1183,"category":804,"tags":1184},[894],"2020-01-24","\n\nMost people know Goldman Sachs as the global investment banking giant, but over the past few years the company has branched out to some pretty modern applications that go beyond the standard financial firm. At GitLab Commit Brooklyn 2019, [George Grant](https://www.linkedin.com/in/george-grant-21a9624), who runs the US SDLC engineering team at Goldman Sachs, explained how they’ve partnered with GitLab to help transform not only their development but the company as a whole.\n\n“It means we have to be a lot more nimble than we were in the past,” Grant says. “Now that we’re developing things that run on people’s iPhones, you need to have a different sort of infrastructure to do that.” The SDLC engineering team drives strategies for the development team, including legacy products, but also newer platforms like budgeting applications and the latest Apple credit card. The team is at the center of every business move within the organization.\n\n## Getting past the “dark times”\nGolman Sachs has about 10 [SDLCs running](/platform/), having grown organically into its own ecosystem over the years for various purposes. “Many of the things that we have at GS were designed in house – its our own workflow, our own tools doing code reviews, surrounding a minimum amount of external tools. Everthing thats involved in it is very tightly coupled with everything else,” Grant says.\n\nThe deployments, the issue tracker, the builds, and the testing are all linked together in order for everything to be controlled in one environment, including regulatory and compliance. This workflow is comfortable and controlled for users, but not ideal. “The problem is, it is sort of simultaneously its greatest strength and greatest weakness because the tightness of the coupling of the components makes it very difficult to replace any of the ones,” Grant says. If any part of the environment needs to be updated or switched out, it impacts all the others.\n\n\n\nThe engineering team started researching a new strategic direction, primarily looking for a modern Git-based solution. The goal was to find a tool that could alleviate developers’ SDLC workload and provide critical strategies for [cloud and Kubernetes](/2017/11/30/containers-kubernetes-basics/), allowing people to move away from the legacy stack. “You actually want to have something that gives you the freedom to innovate, but still have that control level around it.”\n\n## Creating a roadmap with GitLab\nGoldman Sachs chose GitLab as a way to move to the cloud, as an automation tool and to ultimately become the center of the ecosystem. “We didn’t want GitLab to be an island,” Grant says. Within the first two weeks of introducing GitLab, there were over 1600 users, underscoring the push for a new strategic platform.\n\nGitLab users can be innovative without restrictions. Each user group continues to work in their own world of tooling, but in a highly regulated environment. Reduced cycle times are another benefit, according to Grant. “We have one team that used to only be able to do a release every two weeks. Now they can do one and do another one five minutes later if they want to,” he says.\n\nFor an experienced company, the ability to integrate with legacy tools is important. On top of that, GS is embracing DevOps and QA metrics now that they have end-to-end visibility within the ecosystem. The transparency of GitLab allows Goldman Sachs to have input. “We have new ideas and new ways that we want to use the product to drive it strategically within GS,” Grant says.\n\n## Goldman Sachs and GitLab: Better together\nGoldman Sachs and GitLab have established a partnership. “The proof is in the pudding, as they say, and Goldman Sachs was very, very happy to become an investor in GitLab,” Grant says. As users of the tool, Goldman Sachs found it to be a natural investment opportunity. Bottom line, he says, people are demanding to use it more often. “We believe it is the strategic platform to take us into the future.”\n\nTo learn more about Goldman Sach’s implementation strategies, watch George Grant’s presentation from GitLab Commit Brooklyn 2019.\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Bu3nrxPy1-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Tomasz Frankowski](https://unsplash.com/@sunlifter?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,830,743,269],{"slug":1186,"featured":6,"template":700},"goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","content:en-us:blog:goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","Goldman Sachs Partners With Gitlab For Next Gen Platform Strategies","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"_path":1192,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1193,"content":1199,"config":1205,"_id":1207,"_type":14,"title":1208,"_source":16,"_file":1209,"_stem":1210,"_extension":19},"/en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab",{"title":1194,"description":1195,"ogTitle":1194,"ogDescription":1195,"noIndex":6,"ogImage":1196,"ogUrl":1197,"ogSiteName":686,"ogType":687,"canonicalUrls":1197,"schema":1198},"How Indeed transformed its CI platform with GitLab","The world's #1 job site migrated thousands of projects to GitLab CI, boosting productivity and cutting costs. Learn the benefits they realized, including a 79% increase in daily pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099351/Blog/Hero%20Images/Blog/Hero%20Images/Indeed-blog-cover-image-2_4AgA1DkWLtHwBlFGvMffbC_1750099350771.png","https://about.gitlab.com/blog/how-indeed-transformed-its-ci-platform-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Indeed transformed its CI platform with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Carl Myers\"}],\n        \"datePublished\": \"2024-08-27\",\n      }",{"title":1194,"description":1195,"authors":1200,"heroImage":1196,"date":1202,"body":1203,"category":828,"tags":1204},[1201],"Carl Myers","2024-08-27","***Editor's note: From time to time, we invite members of our customer community to contribute to the GitLab Blog. Thanks to Carl Myers, Manager of CI Platforms at Indeed, for sharing your experience with GitLab.***\n\nHere at Indeed, our mission is to help people get jobs. Indeed is the [#1 job site](https://www.indeed.com/about?isid=press_us&ikw=press_us_press%2Freleases%2Faward-winning-actress-viola-davis-to-keynote-indeed-futureworks-2023_textlink_https%3A%2F%2Fwww.indeed.com%2Fabout) in the world with more than 350 million unique visitors every month.\n\nFor Indeed's Engineering Platform teams, we have a slightly different motto: \"We help people to help people get jobs.\" As part of a data-driven engineering culture that has spent the better part of two decades always putting the job seeker first, we are responsible for building the tools that not only make this possible, but empower engineers to deliver positive outcomes to job seekers every day.\n\nGitLab Continuous Integration has allowed Indeed’s CI Platform team of just 11 people to effectively support thousands of users across the company. Other benefits Indeed has realized by moving to GitLab CI include:\n- 79% increase in daily pipelines\n- 10-20% lower CI hardware costs\n- Decreased support burden\n\n## Evolving our CI platform: From Jenkins to a scalable solution\n\nLike many large technology companies, we built our CI platform organically as the company scaled, using the de facto open source and industry standard solutions available at the time. Back in 2007, when Indeed had fewer than 20 engineers, we were using Hudson, Jenkins’ direct predecessor.\n\nToday, through nearly two decades of growth, we have thousands of engineers. As new technology became available, we made incremental improvements, switching to Jenkins around 2011. Another improvement allowed us to move most of our workloads to dynamic cloud worker nodes using [AWS EC2](https://aws.amazon.com/ec2/). As we entered the Kubernetes age, however, the system architecture reached its limits.\n\nJenkins’ architecture was not created with the cloud in mind. Jenkins operates by having a \"controller\" node, a single point of failure that runs critical parts of a pipeline and farms out certain steps to worker nodes (which can scale horizontally to some extent). Controllers are also a manual scaling axis.\n\nIf you have too many jobs to fit on one controller, you must partition your jobs across controllers manually. CloudBees offers ways to mitigate this, including the CloudBees Jenkins Operations Center, which allows you to manage your constellation of controllers from a single centralized place. However, controllers remain challenging to run in a Kubernetes environment because each controller is a fragile single point of failure. Activities like node rollouts or hardware failures cause downtime.\n\nIn addition to the technical limitations baked into Jenkins itself, our CI platform also had several problems of our own making. For example, we used the Groovy Jenkins DSL to generate jobs from code in each repository. This led to each project having its own copy-pasted job pipeline, resulting in hundreds of versions that were hard to maintain and update. While Indeed’s engineering culture values flexibility and allows teams to operate in separate repositories, this flexibility became a burden as teams spent too much time addressing regular maintenance requests.\n\nRecognizing our technical debt, we turned to the [Golden Path pattern](https://tag-app-delivery.cncf.io/whitepapers/platforms/), which allows flexibility while providing a default route to simplify updates and encourage consistent practices across projects.\n\nThe CI Platform team at Indeed is not very large. Our team of around 11 engineers supports thousands of users, fielding support requests, performing upgrades and maintenance, and enabling always-on support for our global company.\n\nBecause our team not only supports our GitLab instance but also the entire CI platform, including the artifact server, our shared build code, and multiple other custom components of our platform, we had our work cut out for us. We needed a plan that would help us address our challenges while making the most efficient use of our existing resources.\n\n## Moving to GitLab CI\n\nAfter a careful design review with key stakeholders, we decided to migrate the entire company from Jenkins to GitLab CI. The primary reasons for choosing GitLab CI were:\n- We were already using GitLab for source code management.\n- GitLab is a complete offering that provides everything we need for CI.\n- GitLab CI is designed for scalability and the cloud.\n- GitLab CI enables us to write templates that extend other templates, which is compatible with our golden path strategy.\n- GitLab is open source software and the GitLab team has always been supportive in helping us submit fixes, giving us extra flexibility and reassurance.\n\nBy the time we officially announced that the GitLab CI Platform would be generally available to users, we already had 23% of all builds happening in GitLab CI from a combination of grassroots efforts and early adopters.\n\nThe challenge of the migration, however, would be the long tail. Due to the number of custom builds in Jenkins, an automated migration tool would not work for the majority of teams. Most of the benefits of the new system would not come until the old system was at 0%. Only then could we turn off the hardware and save the CloudBees license fee.\n\n## Feature parity and the benefits of starting over\n\nThough we support many different technologies at Indeed, the three most common languages are Java, Python, and JavaScript. These language stacks are used to make libraries, deployables (web services or applications), and cron jobs (a process that runs at regular intervals, for example, to build a data set in our data lake). Each of these formed a matrix of project types (Java Library, Python Cronjob, JavaScript Webapp, etc.) for which we had a skeleton in Jenkins. Therefore, we had to produce a golden path template in GitLab CI for each of these project types.\n\nMost users could use these recommended paths without change, but for those who did require customization, the golden path would still be a valuable starting point and enable them to change only what they needed, while still benefiting from centralized template updates in the future.\n\nWe quickly realized that most users, even those with customizations, were happy to take the golden path and at least try it. If they missed their customizations, they could always add them later. This was a surprising result! We thought that teams who had invested in significant customization would be loath to give them up, but in the majority of cases teams just didn't care about them anymore. This allowed us to migrate many projects very quickly — we could just drop the golden path (a small file about 6 lines long with includes) into their project, and they could take it from there.\n\n## InnerSource to the rescue\n\nThe CI Platform team also adopted a policy of \"external contributions first\" to encourage everyone in the company to participate. This is sometimes called InnerSource. We wrote tests and documentation to enable external contributions — contributions from outside our immediate team — so teams that wanted to write customizations could instead include them in the golden path behind a feature flag. This let them share their work with others and ensure we didn't break them moving forward (because they became part of our codebase, not theirs).\n\nThis also had the benefit that particular teams who were blocked waiting for a feature they needed were empowered to work on the feature themselves. We could say \"we plan to implement the feature in a few weeks, but if you need it earlier than that we are happy to accept a contribution.\" In the end, many core features necessary for parity were developed in this manner, more quickly and better than our team had resources to do it. The migration would not have been a success without this model.\n\n## Ahead of schedule and under budget\n\nOur CloudBees license expired on April 1, 2024. This gave us an aggressive target to achieve the full migration. This was particularly ambitious considering that at the time, 80% of all builds (60% of all projects) still used Jenkins for their CI. This meant over 2,000 [Jenkinsfiles](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) would still need to be rewritten or replaced with our golden path templates.\n\nTo achieve this target, we made documentation and examples available, implemented features where possible, and helped our users contribute features where they were able.\n\nWe started regular office hours, where anyone could come and ask questions or seek our help to migrate. We additionally prioritized support questions relating to migration ahead of almost everything else. Our team became GitLab CI experts and shared that expertise inside our team and across the organization.\n\nAutomatic migration for most projects was not possible, but we discovered it could work for a small subset of projects where customization was rare. We created a Sourcegraph batch change campaign to submit merge requests to migrate hundreds of projects, and poked and prodded our users to accept these MRs.\n\nWe took success stories from our users and shared them widely. As users contributed new features to our golden paths, we advertised that these features \"came free\" when you migrated to GitLab CI. Some examples included built-in security and compliance scanning, Slack notifications for CI builds, and integrations with other internal systems.\n\nWe also conducted a campaign of aggressive \"scream tests.\" We automatically disabled Jenkins jobs that hadn't run or succeeded in a while, and told users that if they needed them, they could turn them back on. This was a low-friction way to identify which jobs were actually needed. We had thousands of jobs that hadn't been run a single time since our last CI migration (which was Jenkins to Jenkins). This told us we could safely ignore almost all of them.\n\nIn January 2024, we nudged our users by announcing that all Jenkins controllers would become read-only (no builds) unless an exception was explicitly requested. We had much better ownership information for controllers and they generally aligned with our organization's structure, so it made sense to focus on controllers rather than jobs. The list of controllers was also a much more manageable list than the list of jobs.\n\nTo obtain an exception, we asked our users to find their controllers in a spreadsheet and put their contact information next to each one. This enabled us to get a guaranteed up-to-date list of stakeholders we could follow up with as we sprinted to the finish line, but also enabled users to clearly let us know which jobs they absolutely needed. At peak, we had about 400 controllers; by January we had 220, but only 54 controllers required exceptions (several of them owned by us, to run our tests and canaries).\n\n![Indeed - Jenkins Controller Count graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099357/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099357392.png)\n\nWe had a manageable list of around 50 teams we divided among our team and started doing outreach to understand how each team was progressing with the migration. We spent January and February discovering that some teams planned to finish their migration without our help before February 28 others were planning to deprecate their projects before then, and a very small number were very worried they wouldn't make it.\n\nWe were able to work with this smaller set of teams and provide them with “white-glove” service. We still explained that while we lacked the expertise necessary to do the migration for them, we could partner with a subject matter expert from their team. For some projects, we wrote and they reviewed; for others, they wrote and we reviewed. In the end, all of our work paid off and we turned off Jenkins on the very day we had announced 8 months earlier.\n\n## The results: Enhanced CI efficiency and user satisfaction\n\nAt its peak, our Jenkins CI platform ran over 14,000 pipelines per day and serviced our thousands of projects. Today, our GitLab CI platform has run over 40,000 pipelines in a single day and regularly runs over 25,000 per day. The incremental cost of each job of each pipeline is similar to Jenkins, but without the overhead of hardware to run the controllers. Additionally, these controllers served as single points of failure and scaling limiters that forced us to artificially divide our platform into segments. While an apples-to-apples comparison is difficult, we find that with this overhead gone our CI hardware costs are 10-20% lower. Additionally, the support burden of GitLab CI is lower since the application automatically scales in the cloud, has cross-availability-zone resiliency, and the templating language has excellent public documentation available.\n\nA benefit just as important, if not moreso, is that now we are at over 70% adoption of our golden paths. This means that we can roll out an improvement and over 5,000 projects at Indeed will benefit immediately with no action required on their part. This has enabled us to move some jobs to more cost-effective ARM64 instances, keep users' build images updated more easily, and better manage other cost saving opportunities. Most importantly, our users are happier with the new platform.\n\n__About the author:__\n*Carl Myers lives in Sacramento, CA, and is the manager of the CI Platform team at Indeed. Carl has spent his nearly two-decade career dedicated to building internal tools and developer platforms that delight and empower engineers at companies large and small.*\n\n**Acknowledgements:**\n*This migration would not have been possible without the tireless efforts of Tron Nedelea, Eddie Huang, Vivek Nynaru, Carlos Gonzalez, Lane Van Elderen, and the rest of the CI Platform team. The team also especially appreciates the leadership of Deepak Bitragunta, and Irina Tyree for helping secure buy-in, resources and company wide alignment throughout this long project. Finally, our thanks go out to everyone across Indeed who contributed code, feedback, bug reports, and helped migrate projects.*\n\n**This is an edited version of the article [How Indeed Replaced Its CI Platform with Gitlab CI](https://engineering.indeedblog.com/blog/2024/08/indeed-gitlab-ci-migration/), originally published on the Indeed engineering blog.**",[833,109,9,496],{"slug":1206,"featured":91,"template":700},"how-indeed-transformed-its-ci-platform-with-gitlab","content:en-us:blog:how-indeed-transformed-its-ci-platform-with-gitlab.yml","How Indeed Transformed Its Ci Platform With Gitlab","en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab.yml","en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab",{"_path":1212,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1213,"content":1219,"config":1226,"_id":1228,"_type":14,"title":1229,"_source":16,"_file":1230,"_stem":1231,"_extension":19},"/en-us/blog/how-startups-build-it-infrastructure",{"title":1214,"description":1215,"ogTitle":1214,"ogDescription":1215,"noIndex":6,"ogImage":1216,"ogUrl":1217,"ogSiteName":686,"ogType":687,"canonicalUrls":1217,"schema":1218},"A way for startups to build a solid IT infrastructure","Seven free software solutions to cover your most important use cases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679216/Blog/Hero%20Images/startups-it-infrastructure.jpg","https://about.gitlab.com/blog/how-startups-build-it-infrastructure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A way for startups to build a solid IT infrastructure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"plapadoo\"}],\n        \"datePublished\": \"2017-08-07\",\n      }",{"title":1214,"description":1215,"authors":1220,"heroImage":1216,"date":1222,"body":1223,"category":694,"tags":1224},[1221],"plapadoo","2017-08-07","\n\n *plapadoo is a software startup from Hannover, Germany, providing tailored, high-quality software engineering to their clients. They fill us in on how they chose solutions for their IT infrastructure, including communication, backups, [CI/CD](/topics/ci-cd/) and more.*\n\n\u003C!-- more -->\n\nWe recently founded [our company](https://plapadoo.de/) and so one of the first things to do was to get our infrastructure up and running. As a software startup, our technical infrastructure is the heart of our company. It influences our productivity, has impact on our costs and offers a great chance to set us apart from the competition. Having a good infrastructure is also key to saving us money and increasing development speed.\n\nWhen planning the setup of our infrastructure, we kept two things in mind: First, we wanted to have open source software running wherever possible, and second, we wanted to use strong encryption for both communication and data storage. Also, we prefer lightweight software with few dependencies. Below, you find a small list of important use cases and which software we use to cover them:\n\n- [Chat](#chat) ([Matrix](https://matrix.org/)/[Riot](https://about.riot.im/) web app + Android app)\n- [Email](#email) (self-hosted [Dovecot](https://www.dovecot.org/) + [Postfix](http://www.postfix.org/) + [Sieve](http://sieve.info/) + [SpamAssasin](http://spamassassin.apache.org/))\n- [Calendar and Contacts](#calendar-and-contacts) ([Radicale](http://radicale.org/))\n- [Voice Conferencing](#voice-conferencing) ([uMurmur](http://umurmur.net/)/[Mumble](https://wiki.mumble.info/))\n- [Synchronization of files across multiple devices](#data-storage) ([Syncthing](https://syncthing.net/))\n- [Git and Continuous Integration](#build-and-continuous-integration) ([GitLab](/stages-devops-lifecycle/) & [GitLab CI](/solutions/continuous-integration/))\n- [Backup and Traceability](#backup-and-traceability) ([borgmatic](https://github.com/witten/borgmatic) & [etckeeper](http://etckeeper.branchable.com/))\nBesides this, we have other services (like VPN or HTTP servers) running which are not that special and as such, are not covered on this article.\n\n## Base setup\n\nIt all starts with choosing the platform to run your software on. We decided to use [Arch Linux](https://www.archlinux.org/) as the operating system for our server. Our main reasons for choosing Arch Linux were its active community, good documentation, highly up-to-date repositories with current versions of important software, good support for disk encryption, and finally, the fact that Arch Linux has a rolling update scheme instead of a release-based one. This last point is especially important to us, since we do not want to go through the pain of upgrading our operating system from one version to the next every other year -  which usually causes lots of trouble. Furthermore, release-based distributions tend to have outdated software in their repositories. Instead, we prefer to keep our system always up to date and enjoy the latest version of any software any time.\n\nMost of our software is installed using Arch Linux’ package manager. However, in some cases [Docker](https://docker.com/) is also a good idea to use for running software. This is especially the case when software introduces dependencies you don’t want on your host system or if you are in doubt about the security of a software. Since Docker provides a certain level of isolation, security breaches don’t have as bad consequences as they have when you are running the vulnerable software directly on your host system. However, it should be kept in mind that there is the risk of a so-called container breakout. This basically means that your host system can be subject to an attack even if the vulnerable software is running inside a Docker container. Other reasons for using Docker can be wanting to try something out without messing up your host system or maybe software is simply not available for your Linux distribution. Of course, there are many other advantages to containerization, but we won’t be covering those today.\n\n## Communication\n\nCommunication, and using appropriate communication channels has been central to us since the very beginning. We wanted a means of communicating that was secure, fast, reliable, and easily accessible from any device. This applies to chat, email, contacts and calendar entries.\n\n### Chat\n\nFor chatting, we needed a solution which supported the concept of a “room” or “channel,” so as to keep discussions clear and separated from each other. We found Matrix/Synapse and Riot to be a perfect solution. While we also tried alternatives, such as Rocket.Chat and Mattermost, we liked Riot/Matrix the most because of its native Android app, its active development, and an open API.\n\nWe are using the Matrix API to run custom chat bots. These bots have become quite an important factor in our company, since they massively increase transparency and information distribution among the team. For example, we have bots to inform us about new commits being pushed to our GitLab server, new calendar entries being created in our shared calendar, successful or failed builds and so on. We will cover these bots in detail in an upcoming article.\n\n### Email\n\nSince we want to have complete control over the data belonging to our core business, we use a private mail server. It is indeed challenging to set up securely, but we still decided to go with it because of how important secure and private communication is to us. We had to read a lot of documentation before we could set it up, most importantly to prevent a security hole in the system. Not doing that would possibly mean ending up on a spammer blacklist, since someone could be abusing our mail server, or an attacker gaining access to our mail. It is a lot of work, but we definitely recommend taking the time to understand every step of the process and avoid any mistakes. On the client side, we seek to encrypt our emails using PGP whenever possible.\n\n### Calendar and contacts\n\nIn order to have a shared calendar as well as a shared address book, we are running Radicale, which is a lightweight CalDAV and CardDAV server. Although it is not easy to configure, it comes with support for Git and just quietly does its job in the background. We have never experienced any problems with this software so far and like it for its reliability. For Android and iOS, there are CalDAV and CardDAV adapters available to synchronize everything with your phone.\n\n### Voice Conferencing\n\nFor voice conferencing, it was very important to us to have a trustworthy open source solution in place. Proprietary solutions always come at the risk of backdoors being shipped along with them. We decided to give Mumble a try. Mumble is an open source voice client that requires a central server to handle all the traffic. The official server implementation is called Murmur. When installing Murmur, we learned that it pulls in a giant bunch of dependencies.\n\nAmong those dependencies are things such as X11 which most people don’t want on their servers. The problem with such dependencies is that they introduce potential attack surfaces as well as costing time, money, and other resources to maintain and update them. So you normally want as few dependencies as possible. This alone would make it a bad fit for us, but we still decided to give it a try. One option would have been to run Murmur inside a Docker container where the mentioned dependencies wouldn’t bother us too much. While we were configuring Murmur, we had to choose a server password. As always, we generated a long, strong password with about 60 random characters (including special characters). As we started the server and tried to connect a client, we were completely shocked. Murmur let clients in without requiring a password.\n\nWe found out that Murmur seems to have a problem with long passwords and then just ignores them. So if you configure Murmur with the goal of strong security, you get no security at all. Needless to say that we immediately uninstalled Murmur and all of the crazy dependencies it introduced.\nWhile looking for alternatives, we soon discovered uMurmur which is an alternative Mumble server implementation aiming at embedded systems. It comes with few dependencies and generally seems to be well implemented. We installed it, did not experience any issues with long passwords and have been using it ever since without any problem. The communication is encrypted using a TLS certificate.\n\n## Data storage\n\n![box files](https://about.gitlab.com/images/blogimages/startups-it-infrastructure-body.jpg){: .shadow}\n\nAnother important aspect within a company besides communication is the need to store and distribute documents among its different stakeholders.\nWhen sharing data, most programmers will normally use Git. However, Git is not to best choice for sharing binary data such as documents, photos, videos, etc., because one usually doesn’t need to keep different versions of these files. A common approach is to use ownCloud/NextCloud for data sharing, but since we really don’t like PHP, we precluded these two applications.\n\nInstead, we discovered Syncthing. Once you understand the concept of Syncthing, it is easy to set up, extremely easy to use and it just works out of the box. Syncthing can be described as a software which synchronizes data across several nodes. We have one Syncthing instance running on our server that acts as a kind of master node, although a master is not explicitly needed -  Syncthing is completely decentralized. We also run Syncthing on our desktops and phones. Each Syncthing node has a unique ID, which has to be added using the web interface of the master node in order to share data with them. For the local node, the unique ID of the master node has to be added accordingly. Using this concept of a master node, we don’t have to wire all our devices to each other -  it is enough to just wire each device to the master node.\n\nAfter that, you can select which folders should be shared using Syncthing. Syncthing will then automatically upload any new data you put into these folders to the remote node. Data added by other users is downloaded to the clients on the fly, and deletions of files, changes, etc. are also applied locally. For Android, there is a native Syncthing app available which does exactly the same. By using Syncthing, all our devices always have the latest version of the data stored inside the Syncthing shares on the master node.\n\n## Build and continuous integration\n\nFor Git and continuous integration, we use GitLab, which already comes with integrated CI features. Although GitLab is quite resource-hungry, it provides lots of very nice features such as an integrated issue tracker and the “snippets” area -  where you can paste code snippets and share them. GitLab is well documented and has an open API. It features webhooks that you can use to trigger HTTP requests whenever commits are pushed, CI pipelines start, and so on. We use that to generate notifications in matrix rooms corresponding to the Git repositories. So, for example, if someone pushes a commit to project “foo,” we get a notification in a Matrix room “room about foo,” which is linked to this project.\n\n>GitLab provides lots of very nice features such as an integrated issue tracker and the “snippets” area -  where you can paste code snippets and share them\n\nWe are using the official GitLab Docker image, which already includes [Prometheus](https://prometheus.io/) for monitoring. We are accessing this Prometheus instance from our host system and plot its data in a dedicated [Grafana](https://grafana.com/) dashboard. This way, we can monitor our GitLab server internals with very little effort.\n\nFor building a project using GitLab CI, you need a so-called “gitlab-runner” that acts as a build agent. There are also official Docker images available for those runners, but we have created our own Docker base image, which has some basic tools we constantly need. We use our custom base image to build individual runners for each project on top of it. This way, we have runners tailored exactly to the needs of our projects. Since the Docker socket is mapped into our runners, we can even build and deploy Docker images from within them.\n\nWe like the fact that the build jobs are defined through a “.gitlab-ci.yml” file that is versioned with each project. This way, you can track changes to the build process and always have a running build - even if you checkout an old version of a project.\n\n## Backup and traceability\n\nBacking up your data is very important. Especially nowadays with the widespread use of SSDs, when fatal disk failure is likely to happen. Other reasons for data loss may be accidental deletion or attacks. We are using [BorgBackup](https://borgbackup.readthedocs.io/) together with borgmatic, which is a nice, simple, incremental, and highly automatable backup solution. You can easily specify files to exclude from the backup, and also select how many daily, weekly, monthly and yearly backups you want Borg to keep. By setting up a Cron job or systemd timer, you can fully automate the backup process. We create backups every night and store them on an NFS storage, which is only mounted when the backup process is running. This way, we avoid the backup to be deleted by an accidental `rm -rf /` or some other mishap. Borg encrypts the backups and supports compression to keep your backups safe and small. We like to keep track of any changes we make to the system, especially those to configuration files.\n\nFor Linux, there is a useful little tool called etckeeper, which turns your `/etc` directory into a Git repository. It also adds hooks to your package manager to automatically commit any configuration changes being performed during system updates. Using etckeeper, every configuration change corresponds to a Git commit, with an author, a timestamp and a message. This provides for much more transparency, especially when more than one person administrates a server. Also, the way Git works, accidental changes are detected and bad configurations can be easily reverted.\n\n## Summary\n\nWe explained that we, at plapadoo, prefer lightweight (in terms of dependencies), focused software over bloated solutions and favor open source software. Our custom chat bot gives us a high level of transparency and awareness, and also improves our productivity, since we always know what’s going on, even if working remotely. Lastly, we explained which software solutions we have chosen for which use cases and why.\n\nIf you liked this article, please help us reach more readers by sharing it. If you have any questions, thoughts or recommendations on the topic, feel free to comment. Which software solutions did you choose for your startup?\n\n_This post was originally published on [Medium](https://medium.com/plapadoo/a-way-for-startups-to-build-a-solid-it-infrastructure-a48b222fbff6/)._\n\n[CERN reception, Meyrin, Switzerland](https://unsplash.com/@samuelzeller?photo=JuFcQxgCXwA) by [Samuel Zeller](https://unsplash.com/@samuelzeller) on Unsplash.\n{: .note}\n",[697,1225,9],"startups",{"slug":1227,"featured":6,"template":700},"how-startups-build-it-infrastructure","content:en-us:blog:how-startups-build-it-infrastructure.yml","How Startups Build It Infrastructure","en-us/blog/how-startups-build-it-infrastructure.yml","en-us/blog/how-startups-build-it-infrastructure",{"_path":1233,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1234,"content":1240,"config":1248,"_id":1250,"_type":14,"title":1251,"_source":16,"_file":1252,"_stem":1253,"_extension":19},"/en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"title":1235,"description":1236,"ogTitle":1235,"ogDescription":1236,"noIndex":6,"ogImage":1237,"ogUrl":1238,"ogSiteName":686,"ogType":687,"canonicalUrls":1238,"schema":1239},"Fighting the opioid epidemic with ML & human-centered design","GitLab users Jack Cackler and Frank Lee explain how they use predictive analytics to empower community stakeholders, like first responders and policy makers, to save lives.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671593/Blog/Hero%20Images/telesphora-team.jpg","https://about.gitlab.com/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-09-05\",\n      }",{"title":1241,"description":1236,"authors":1242,"heroImage":1237,"date":1244,"body":1245,"category":804,"tags":1246},"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design",[1243],"Erica Lindberg","2018-09-05","\n\nOn average, [115 Americans die every day](https://www.cdc.gov/drugoverdose/epidemic/index.html) from an opioid overdose. The team at [Telesphora](https://telesphora.com/) is on a mission to help health care professionals and local communities change that.\n\nIn 2017, the United States Department of Health and Human Services (HHS) declared the current opioid crisis a public health emergency as the number of [deaths involving opioids](https://www.drugabuse.gov/related-topics/trends-statistics/overdose-death-rates) in the United States skyrocketed from approximately 10,000 in 2002 to an estimated 49,000 in 2017.\nIn response to the crisis, the HHS released a [five-point strategy](https://www.hhs.gov/opioids/about-the-epidemic/hhs-response/index.html) for fighting the opioid epidemic. Among the key priorities you’d expect to see from a health crisis report (e.g. better prevention, treatment, and recovery services; better pain management) is **better data**, and they’ve turned to computer and data scientists for help.\n\n![US map of opioid epidemic](https://about.gitlab.com/images/blogimages/telesphora/us-map-crisis.jpg){: .medium.center}\n*\u003Csmall>In 2016, the number of overdose deaths involving opioids was 5 times higher than in 1999.\u003C/small>*\n\n## Designing for people\n\nJack Cackler is a machine learning specialist. Frank Lee is a pain management specialist. Under typical circumstances, these two may have never met. But when the HHS decided to hold an unprecedented [national opioid crisis code-a-thon](https://www.hhs.gov/challenges/code-a-thon/index.html), they didn’t just enlist developers – they brought in stakeholders from every side of the issue to develop data-driven solutions to combat the opioid epidemic across three tracks: treatment, usage, and prevention.\n\n[Origami Innovations](https://origamiinnovations.com/), a design, innovation, and solution lab powered by a Yale University students, was invited to the code-a-thon, bringing Cackler, Lee, and co-founders Matthew Erlendson, fourth-year medical student at Yale University and founder of Origami Innovations, and Dara Rouholiman, a digital health, data, and machine learning consultant together for the time. After winning the Treatment Track and receiving a $10,000 prize, they formed Telesphora, a human-centered data science platform.\n\n“One of the things that we were involved with was coming up with the core themes for the hackathon,” said Frank Lee, co-founder of Telesphora. “One of the ways that we do that is by human-centered design thinking.”\n\nHuman-centered design is an approach to design that considers the human perspective in every step of the problem-solving process. As Jack Cackler, co-founder at Telesphora, explains, “Sometimes, especially for those with a technical background, there’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\n> \"There’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\nCackler and team knew they wanted to design a human-centered solution. Discovering that the stigma of chronic opioid use was preventing treatment, they started asking questions:\n\n- *How might we treat this like a disease to reduce stigma, taking an empathetic approach similar to outbreaks of the flu or STDs?*\n- *How might we better predict community outbreaks?*\n- *How might we contain high mortality outbreaks, such as bad batches of drugs, to save lives in real time?*\n\n“We involved all the stakeholders in the crisis, which includes not only the providers, the scientists, and the administrators of the local and the state regions, but also the patients and families of patients who are affected by the overdose,” said Lee. “After doing a lot of brainstorming with these participants, we knew there needed to be better communication between first responders. We aimed our solution toward first responders and how they can help each other better allocate resources to help with the overdoses.”\n\n## Empathy over stigma\n\nOn June 23, 2016 in New Haven, Connecticut, where many on Cackler and Lee’s code-a-thon team called home, 12 patients, found within a one-block radius, were taken to Yale New Haven Hospital for opioid overdose. Three lost their lives due to a shortage of the drug Narcan (naloxone), a drug that can treat an opioid overdose to prevent death; the shelf life is short and the cost is high.\n\nPart of the problem, according to Lee and Cackler, is that there’s a common assumption that there’s a uniform distribution of overdoses, therefore, you can accommodate the demand. However, data analysis and conversations with first responders show that overdoses happen in spikes, like the event in New Haven.\n\n“There will be a new distribution channel of some opioid in some city. And then all of a sudden, you'll have a dozen, two dozen overdoses in a weekend, and there's just no way that the ambulances in the city can service that demand,” said Cackler. If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\n\n![telesphora interface](https://about.gitlab.com/images/blogimages/telesphora/hhs1.png){: .medium.center}\n*\u003Csmall>Telesphora is a platform that uses real-time, open-access data and machine learning to predict where and when increases in opioid overdose and mortality will occur.\u003C/small>*\n\nThe solution Cackler, Lee, and the team came up with, now Telesphora, aimed to do just that. Using real-time data and future-trend data, they built a platform that empowers communities to predict outbreaks, increases access to treatment and resources, and reduces the stigma of opioid use.\n\n## Predictive analytics and user-friendly tools save lives\n\nKnowing that if an overdose outbreak is predicted before it happens, life-saving medicine can be allocated to the soon-to-be affected area to save lives, the Telesphora team used predictive analytics and user-friendly design to build a projection model and visualize the data.\n\n> \"If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\"\n\nStarting with historical overdose data and network analysis of supply movements and overdoses, they created a spatiotemporal Poisson process to project future opioid overdose trends at any given space and time. The Poisson process takes real-time data and uses the geographic information, temporal information, and type of drug to predict the movement of opioids, alerting local responders and authorities of a potential overdose outbreak before it happens, bringing response time and mortality rate down.\n\n“The first alerts in this model come from neighboring cities in a flurry of mortality rate. Our tool with a geospatial analysis can predict the movement of spikes. When you see a spike in fentanyl in New Haven, CT, 4.8 days later you’ll see a spike happen in Fairfield,” Cackler explains.\n\n![machine learning explanation](https://about.gitlab.com/images/blogimages/telesphora/machine-learning.jpg){: .medium.center}\n*\u003Csmall>The machine learning model predicts the movement of outbreaks based on surrounding counties.\u003C/small>*\n\nWhen an outbreak is detected, it appears as a spike on the graph and the model can correlate that spike to different regions, alerting communities to how many days until that outbreak affects their area. The data visualization makes it easy for end users, like first responders, to digest the numbers and trends, showing the actual and predicted data across different regions, and the ability to filter by different drugs.\n\n“If we had this model a year before, events like what happened in New Haven could have been predicted. I think that’s really impactful and you can see in a tangible way how this is actionable,” said Cackler.\n\n*Are you using machine learning or human-centered design to build actionable solutions for the future? We want to hear from you! Email content@gitlab.com.*\n\nAll images courtesy of Telesphora\n{: .note}\n",[9,1247],"AI/ML",{"slug":1249,"featured":6,"template":700},"how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","content:en-us:blog:how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","How Telesphora Is Tackling The Opioid Crisis Machine Learning Human Centered Design","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"_path":1255,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1256,"content":1262,"config":1268,"_id":1270,"_type":14,"title":1271,"_source":16,"_file":1272,"_stem":1273,"_extension":19},"/en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"title":1257,"description":1258,"ogTitle":1257,"ogDescription":1258,"noIndex":6,"ogImage":1259,"ogUrl":1260,"ogSiteName":686,"ogType":687,"canonicalUrls":1260,"schema":1261},"How to automatically create a new MR on GitLab with GitLab CI","With this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, creates one.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679783/Blog/Hero%20Images/whats-next-for-gitlab-ci.jpg","https://about.gitlab.com/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automatically create a new MR on GitLab with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2017-09-05\",\n      }",{"title":1257,"description":1258,"authors":1263,"heroImage":1259,"date":1265,"body":1266,"category":741,"tags":1267},[1264],"Riccardo Padovani","2017-09-05","At [fleetster](https://www.fleetster.net/), we have our own instance of\n[GitLab](https://gitlab.com/) and we rely a lot on [GitLab\nCI](/solutions/continuous-integration/). How could it be otherwise? We are a\nsmall team, with a lot of different projects (only in last month, we had\nmore than **13,000 commits** over **25 different projects**, and we are only\n10 people – with myself working part time). Automating as many development\nsteps as possible (from build to QA to deploy) is helping us a lot, but\nsometimes we write some code and then forget about it. This is a disaster!\nWe have some bug fix or some new feature ready, but it is forgotten in some\nbranch somewhere.\n\n\n\u003C!-- more -->\n\n\nThis is why we have a policy to push as soon as possible to open a new MR,\nmark it as WIP, and assign to ourselves; in this way GitLab will remind us\nwe have an MR.\n\n\nYou need to do three steps to achieve that:\n\n\n* Push the code\n\n* Click on the link that appears on your terminal\n\n* Fill a form\n\n\nBut we are nerds. We are lazy. So one night, after a couple of beers,\n[Alberto Urbano](https://www.linkedin.com/in/alberto-urbano-047a4b19/) and I\nspent some hours to automate a task that requires 10 seconds.\n\n\nActually, the experience was quite fun, it was the first time we used GitLab\nAPIs and we learned things we will apply to others scripts as well.\n\n\n![Image via Riccardo's\nblog](https://about.gitlab.com/images/blogimages/automating-tasks-expectation-versus-reality.png){:\n.shadow}\u003Cbr>\n\n*Image by Randall Munroe,\n[xkcd.com](https://imgs.xkcd.com/comics/automation.png)*\n\n\n### The script\n\n\nWith this script, every time we push a commit, GitLab CI checks if the\nbranch that commit belongs to already has an open MR and, if not, it creates\nit. It then assigns the MR to you, and puts **WIP** in the title to mark it\nas a work in progress.\n\n\nIn this way you cannot forget about that branch, and when you’ve finished\nwriting code on it, you just need to remove the WIP from the title and\nassign to the right person to review it.\n\n\nIn the end, this is the script we came out with (when you add to your\nproject, remember to make it executable):\n\n\n```\n\n#!/usr/bin/env bash\n\n# Extract the host where the server is running, and add the URL to the APIs\n\n[[ $HOST =~ ^https?://[^/]+ ]] && HOST=\"${BASH_REMATCH[0]}/api/v4/projects/\"\n\n\n# Look which is the default branch\n\nTARGET_BRANCH=`curl --silent \"${HOST}${CI_PROJECT_ID}\" --header\n\"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" | python3 -c \"import sys, json;\nprint(json.load(sys.stdin)['default_branch'])\"`;\n\n\n# The description of our new MR, we want to remove the branch after the MR\nhas\n\n# been closed\n\nBODY=\"{\n    \\\"id\\\": ${CI_PROJECT_ID},\n    \\\"source_branch\\\": \\\"${CI_COMMIT_REF_NAME}\\\",\n    \\\"target_branch\\\": \\\"${TARGET_BRANCH}\\\",\n    \\\"remove_source_branch\\\": true,\n    \\\"title\\\": \\\"WIP: ${CI_COMMIT_REF_NAME}\\\",\n    \\\"assignee_id\\\":\\\"${GITLAB_USER_ID}\\\"\n}\";\n\n\n# Require a list of all the merge request and take a look if there is\nalready\n\n# one with the same source branch\n\nLISTMR=`curl --silent \"${HOST}${CI_PROJECT_ID}/merge_requests?state=opened\"\n--header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\"`;\n\nCOUNTBRANCHES=`echo ${LISTMR} | grep -o\n\"\\\"source_branch\\\":\\\"${CI_COMMIT_REF_NAME}\\\"\" | wc -l`;\n\n\n# No MR found, let's create a new one\n\nif [ ${COUNTBRANCHES} -eq \"0\" ]; then\n    curl -X POST \"${HOST}${CI_PROJECT_ID}/merge_requests\" \\\n        --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" \\\n        --header \"Content-Type: application/json\" \\\n        --data \"${BODY}\";\n\n    echo \"Opened a new merge request: WIP: ${CI_COMMIT_REF_NAME} and assigned to you\";\n    exit;\nfi\n\n\necho \"No new merge request opened\";\n\n```\n\n\n### GitLab CI\n\n\nThe variables used in the script are passed to it by our `.gitlab_ci.yml`\nfile:\n\n\n```\n\nstages:\n    - openMr\n    - otherStages\n\nopenMr:\n    before_script: []   # We do not need any setup work, let's remove the global one (if any)\n    stage: openMr\n    only:\n      - /^feature\\/*/   # We have a very strict naming convention\n    script:\n        - HOST=${CI_PROJECT_URL} CI_PROJECT_ID=${CI_PROJECT_ID} CI_COMMIT_REF_NAME=${CI_COMMIT_REF_NAME} GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ./utils/autoMergeRequest.sh # The name of the script\n```\n\n\nAll these environment variables are set by GitLab itself, but the\nPRIVATE-TOKEN. A master of the project has to create it in its own profile\nand add to the project settings.\n\n\nTo create the personal token you can go to `/profile/personal_access_tokens`\non your GitLab instance, and then you add to your pipeline following this\nguide.\n\n\n### Ways to improve\n\n\nThe script is far from perfect.\n\n\nFirst of all, it has two API calls, one to take the list of MR and one to\ntake the default branch, to use it as target. Of course you can hardcode the\nvalue (in the end it shouldn’t change often), but hardcoding is always bad.\n\n\nAlso, it uses python3 to extract the name of the target branch – this is\njust one of many possible solutions, just use what is available on your\nsystem. Apart from that, the script doesn’t have any external dependency.\n\n\nThe other thing is how you need to set up the secret token to call the APIs.\nLuckily, GitLab’s developers are working on a [new\nway](https://gitlab.com/gitlab-org/gitlab-ce/issues/12729) to manage secret\ntokens.\n\n\n### Conclusion\n\n\nThis was a very small and very simple example about how much powerful\nContinuous Integration can be. It takes some time to set up everything, but\nin the long run it will save your team a lot of headache.\n\n\nIn fleetster we use it not only for running tests, but also for having\nautomatic versioning of the software and automatic deploys to testing\nenvironments. We are working to automate other jobs as well (building apps\nand publish them on the Play Store and so on).\n\n\nSpeaking of which, **do you want to work in a young and dynamic office with\nme and a lot of other amazing people?** Take a look at the [open positions\nat fleetster](https://www.fleetster.net/fleetster-team.html)!\n\n\nKudos to the GitLab team (and other guys who help in their free time) for\ntheir awesome work!\n\n\nIf you have any question or feedback about this blog post, please drop me an\nemail at riccardo@rpadovani.com :-)\n\n\nBye for now,\n\nA. & R.\n\n\nP.S: if you have found this article helpful and you’d like we write others,\ndo you mind to help us reaching the Ballmer’s peak and buy us a\n[beer](https://rpadovani.com/donations)?\n\n\nThis post originally appeared on\n[*rpadovani.com*](https://rpadovani.com/open-mr-gitlab-ci).\n\n\n## About the Guest Author\n\n\nRiccardo is a university student and a part-time developer at\n[fleetster](http://www.fleetster.net/). When not busy with university or\nwork, he likes to contribute to open-source projects.\n",[109,9,1165],{"slug":1269,"featured":6,"template":700},"how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","content:en-us:blog:how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","How To Automatically Create A New Mr On Gitlab With Gitlab Ci","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"_path":1275,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1276,"content":1282,"config":1290,"_id":1292,"_type":14,"title":1293,"_source":16,"_file":1294,"_stem":1295,"_extension":19},"/en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"title":1277,"description":1278,"ogTitle":1277,"ogDescription":1278,"noIndex":6,"ogImage":1279,"ogUrl":1280,"ogSiteName":686,"ogType":687,"canonicalUrls":1280,"schema":1281},"CI/CD pipeline: GitLab & Helm for Kubernetes Auto Deploy","One user walks through how he tried GitLab caching and split the job into multiple steps to get better feedback.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sergey Nuzhdin\"}],\n        \"datePublished\": \"2017-09-21\",\n      }",{"title":1283,"description":1278,"authors":1284,"heroImage":1279,"date":1286,"body":1287,"category":741,"tags":1288},"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm",[1285],"Sergey Nuzhdin","2017-09-21","Recently, I started working on a few Golang\n[microservices](/topics/microservices/). I decided to try GitLab’s caching\nand split the job into multiple steps for better feedback in the UI.\n\n\n\u003C!-- more -->\n\n\nSince my previous\nposts[[1](http://blog.lwolf.org/post/how-to-build-tiny-golang-docker-images-with-gitlab-ci/)][[2](http://blog.lwolf.org/post/continuous-deployment-to-kubernetes-from-gitlab-ci/)]\nabout [CI/CD](/topics/ci-cd/), a lot has changed. I started using Helm\ncharts for packaging applications, and stopped using docker-in-docker in\ngitlab-runner.\n\n\nHere are a few of the main changes to my `.gitlab-ci.yml` file since my\nprevious post:\n\n\n* no docker-in-docker\n\n* using cache for packages instead of a prebuilt image with dependencies\n\n* splitting everything into multiple steps\n\n* autodeploy to staging environment using Helm, a package manager for\nKubernetes\n\n\n### Building Golang image\n\n\nSince Golang is very strict about the location of the project, we need to\nmake some adjustments to the CI job. This is done in the `before_script`\nblock. Simply create needed directories and link source code in there.\nAssuming that the official repository of the project is\n`gitlab.example.com/librerio/libr_files` it should look like this.\n\n\n```\n\nvariables:\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n```\n\n\nWith this in place, we can install dependencies and build our binaries. To\navoid the download of all packages on each build we need to configure\ncaching. Due to the strange caching rules of GitLab, we need to add vendor\ndirectory to both cache and artifacts. Cache will give us an ability to use\nit between build jobs and artifacts will allow us to use it inside the same\njob.\n\n\n```\n\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\n```\n\n\nBuild step didn’t change, it’s still about building the binary. I add binary\nto artifacts.\n\n\n```\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n```\n\n\n###  Test stage\n\n\nTo run golang tests with coverage reports I’m using the variation of [this\nshell\nscript](https://github.com/mlafeldt/chef-runner/blob/v0.7.0/script/coverage).\nIt runs all tests in project subdirectories and creates a [coverage\nreport](/blog/publish-code-coverage-report-with-gitlab-pages/). I changed it\na bit before putting into a gist. I exclude vendor directory from tests.\n\n\n* coverage regexp for gitlab-ci: `^total:\\s*\\(statements\\)\\s*(\\d+.\\d+\\%)`\n\n\n### Deploy stage\n\n\nI don’t use native GitLab’s integration with Kubernetes.\n\n\nFirst I thought about creating Kubernetes secrets and mounting it to the\ngitlab-runner pod. But it’s very complicated. You need to upgrade deployment\nevery time you want to add new Kubernetes cluster configurations. So I’m\nusing GitLab’s CI/CD variables with base64 encoded Kubernetes config. Each\nproject can have any number of configurations. The process is easy – create\nbase64 string from the configuration file and copy it to the clipboard.\nAfter this, put it into `kube_config` variable (name it whatever you like).\n\n\n`cat ~/.kube/config | base64 | pbcopy`\n\n\nIf you do not own a full GitLab installation, consider creating a Kubernetes\nuser with restricted permissions.\n\n\nThen on the deploy stage, we can decode this variable back into the file and\nuse it with kubectl.\n\n\n```\n\nvariables:\n  KUBECONFIG: /etc/deploy/config\n\ndeploy:\n  ...\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n```\n\n\nDeploy stage also covers the case when you have several versions of the same\napplication.\n\n\nFor example, you have two versions of API: v1.0 and v1.1. All you need to do\nis set `appVersion` in Chart.yaml file. Build system will check API version\nand either deploy or upgrade needed release.\n\n\n```\n\n- export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n\n- export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n\n- export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n\n- if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} .\n--namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} .\n--namespace=${STAGING_NAMESPACE}; fi\n\n```\n\n\n### tl;dr\n\n\n```\n\nHere is complete `.gitlab-ci.yaml` file for reference.\n\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n\nstages:\n  - setup\n  - test\n  - build\n  - release\n  - deploy\n\nvariables:\n  CONTAINER_IMAGE: ${CI_REGISTRY}/${CI_PROJECT_PATH}:${CI_BUILD_REF_NAME}_${CI_BUILD_REF}\n  CONTAINER_IMAGE_LATEST: ${CI_REGISTRY}/${CI_PROJECT_PATH}:latest\n  DOCKER_DRIVER: overlay2\n\n  KUBECONFIG: /etc/deploy/config\n  STAGING_NAMESPACE: app-stage\n  PRODUCTION_NAMESPACE: app-prod\n\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n  POSTGRES_USER: gorma\n  POSTGRES_DB: test-${CI_BUILD_REF}\n  POSTGRES_PASSWORD: gorma\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n\nrelease:\n  stage: release\n  image: docker:latest\n  script:\n    - cd ${APP_PATH}/release\n    - docker login -u gitlab-ci-token -p ${CI_BUILD_TOKEN} ${CI_REGISTRY}\n    - docker build -t ${CONTAINER_IMAGE} .\n    - docker tag ${CONTAINER_IMAGE} ${CONTAINER_IMAGE_LATEST}\n    - docker push ${CONTAINER_IMAGE}\n    - docker push ${CONTAINER_IMAGE_LATEST}\n\ntest:\n  stage: test\n  image: lwolf/golang-glide:0.12.3\n  services:\n    - postgres:9.6\n  script:\n    - cd ${APP_PATH}\n    - curl -o coverage.sh https://gist.githubusercontent.com/lwolf/3764a3b6cd08387e80aa6ca3b9534b8a/raw\n    - sh coverage.sh\n\ndeploy_staging:\n  stage: deploy\n  image: lwolf/helm-kubectl-docker:v152_213\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n  script:\n    - cd deploy/libr-files\n    - helm dep build\n    - export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n    - export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n    - export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n    - if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n  environment:\n    name: staging\n    url: https://librerio.example.com\n  only:\n  - master\n\n```\n\n\n_[How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab\nand\nHelm](http://blog.lwolf.org/post/how-to-create-ci-cd-pipeline-with-autodeploy-k8s-gitlab-helm/)\nwas originally published on Lwolfs Blog._\n\n\nPhoto by C Chapman on [Unsplash](https://unsplash.com/)\n",[831,1289,1165,9],"CD",{"slug":1291,"featured":6,"template":700},"how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","content:en-us:blog:how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","How To Create A Ci Cd Pipeline With Auto Deploy To Kubernetes Using Gitlab","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"_path":1297,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1298,"content":1304,"config":1310,"_id":1312,"_type":14,"title":1313,"_source":16,"_file":1314,"_stem":1315,"_extension":19},"/en-us/blog/humangeo-switches-jenkins-gitlab-ci",{"title":1299,"description":1300,"ogTitle":1299,"ogDescription":1300,"noIndex":6,"ogImage":1301,"ogUrl":1302,"ogSiteName":686,"ogType":687,"canonicalUrls":1302,"schema":1303},"HumanGeo switched from Jenkins to GitLab and cut costs by 1/3","Management overhead was bogging down the team at HumanGeo. GitLab freed up more than just cash.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680315/Blog/Hero%20Images/humangeo-switches-jenkins-to-gitlab.jpg","https://about.gitlab.com/blog/humangeo-switches-jenkins-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"HumanGeo switched from Jenkins to GitLab and cut costs by 1/3\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2017-11-14\",\n      }",{"title":1299,"description":1300,"authors":1305,"heroImage":1301,"date":1307,"body":1308,"category":741,"tags":1309},[1306],"William Chia","2017-11-14","\n\nAs a software development company, [HumanGeo](http://www.thehumangeo.com/) ships a lot of code. Specializing in geospatial visualization, they have clients in every sector from video game companies to government agencies. The ability to manage multiple projects, iterate quickly, and operate at scale is critical to their success. Over time, a robust DevOps practice has evolved to allow them to quicken their pace of innovation. But traditional tools in their stack, like Jenkins CI, haven’t be able to deliver.\n\n\u003C!-- more -->\n\nI recently caught up with [Justin Shelton](https://twitter.com/kwonstant), an engineer at HumanGeo, to talk about their expanded use of GitLab and how it’s improved both their workflow and budget. Here’s what he had to say:\n\n## Ease of use cuts admin time by 96%\n\n**William**: Can you tell me about the benefits you’ve seen from GitLab in terms of ease-of-use?\n\n**Justin**: Defining CI as code fits great with the \"Infrastructure as Code\" philosophy. We already push hard to have AWS environments expressed in CloudFormation templates, provisioning via Ansible, and so on. With GitLab CI, we can manage our CI pipeline the same way – with code.\n\nManaging YAML for Domain Specific Language (DSL) is way easier than managing Groovy for Jenkinsfiles (or most other config formats, for that matter). YAML is far more widespread and easy to understand, so more developers at junior and senior levels are exposed to it. The path to getting smart on writing GitLab CI DSL is much faster than coming up to speed on Groovy. While Jenkins is overwhelmingly customizable and familiar, it became Yet Another Thing to Manage™. In the end, GitLab CI shares a lot of the same (and in some cases more) configuration options.\n\nAs full stack engineers we do a lot of our own systems administration. Reducing our platform management burden is a huge plus. We used to spend a 5-6 hours each month managing Jenkins and keeping it running. Now, I might spend 10-15 minutes a month managing GitLab CI.\n\n## Flexible CI runners cuts costs 33%\n\n**William**: In [your blog post](http://blog.thehumangeo.com/gitlab-autoscale-runners.html) you shared that GitLab helped to cut infrastructure costs. How did that work in practice?\n\n**Justin**: The ability to integrate with handlers, like the Docker Machine interface I talk about in the post, is huge for helping to manage costs. We get resources when we need them, and can spin them down when we don't. That saves big money compared to maintaining a large instance and having to manage the JVM size and other factors whenever we run out of space. With Jenkins we used to run a dedicated m2.xlarge on AWS all the time for CI purposes. Now, with GitLab, we are able to run spot instances for only around 40 hours a week, resulting in about 1/3 cost savings. Engineers can change a few config items, and managers can see savings. Win!\n\n## Increasing the pace of innovation\n\n**William**: How else has GitLab adoption impacted your workflow?\n\n**Justin**: The speed of development is huge – new features get added every month, and I get genuinely excited to check out the release notes and update our instance every month. (Another perk is how simple this is, upgrading with two apt commands is as easy as it gets.)\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is the thing I'm most excited to dig into further that's come out recently. I'm excited about taking some of our bespoke release processes and tightening them up using this process. We're sticklers for code quality, so the Code Quality features were big, and we want to start utilizing Auto DevOps for canary releases as well.\n\n## Learn exactly how they did it\n\nAt HumanGeo using Jenkins CI proved to be costly in both time and money. Switching to GitLab reduced administration overhead, lowered spend, and increased development velocity. Justin wrote up a post to share all the technical details on [how HumanGeo scaled GitLab CI runners](http://blog.thehumangeo.com/gitlab-autoscale-runners.html). Check it out and let know us know what you think in the comments or on Twitter.\n\n\"[Pipe Dream](https://unsplash.com/photos/T7s_TnKO-dk)\" by [Sharosh Rajasekher](https://unsplash.com/@sharosh) on Unsplash\n{: .note}\n",[833,831,9],{"slug":1311,"featured":6,"template":700},"humangeo-switches-jenkins-gitlab-ci","content:en-us:blog:humangeo-switches-jenkins-gitlab-ci.yml","Humangeo Switches Jenkins Gitlab Ci","en-us/blog/humangeo-switches-jenkins-gitlab-ci.yml","en-us/blog/humangeo-switches-jenkins-gitlab-ci",{"_path":1317,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1318,"content":1324,"config":1330,"_id":1332,"_type":14,"title":1333,"_source":16,"_file":1334,"_stem":1335,"_extension":19},"/en-us/blog/introducing-auto-breakfast-from-gitlab",{"title":1319,"description":1320,"ogTitle":1319,"ogDescription":1320,"noIndex":6,"ogImage":1321,"ogUrl":1322,"ogSiteName":686,"ogType":687,"canonicalUrls":1322,"schema":1323},"Introducing Auto Breakfast from GitLab (sort of)","GitLab can't make you breakfast? This is what happens when you tell a GitLab team member whose favorite catchphrase is \"Challenge accepted.\"","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680054/Blog/Hero%20Images/auto-breakfast.jpg","https://about.gitlab.com/blog/introducing-auto-breakfast-from-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing Auto Breakfast from GitLab (sort of)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2018-06-29\",\n      }",{"title":1319,"description":1320,"authors":1325,"heroImage":1321,"date":1327,"body":1328,"category":741,"tags":1329},[1326],"Brendan O'Leary","2018-06-29","\n\nA big part of [GitLab's culture](/company/culture/) is [saying thanks](https://handbook.gitlab.com/handbook/communication/#say-thanks) to one another for doing a great job. That can be anything from helping with a tough technical problem to simply sharing a nice [coffee chat](/company/culture/all-remote/#coffee-chats) to break up the work day. One day a Sales team member thanked someone from Customer Success for a great demo of [GitLab CI/CD](/solutions/continuous-integration/). The customer commented afterwards, \"Okay, what doesn't GitLab do?\"\n\nPlenty of heart-themed emoji reactions followed. We've seen users do some pretty amazing things with GitLab CI/CD, from [ramping up to weekly mobile releases](/blog/continuous-integration-ticketmaster/) to [automating boring Git operations](/blog/automating-boring-git-operations-gitlab-ci/), to [saving 90 percent on EC2 costs](/blog/autoscale-ci-runners/). However, there was one thing we hadn't seen. So in addition to this love, the question also garnered a semi-sarcastic answer:\n\n> It won't make breakfast for you, unfortunately.\n\nNever one to let a Slack conversation go unnoticed, I replied with one of my favorite phrases:\n\n![Challenge Accepted](https://about.gitlab.com/images/blogimages/breakfast-challenge.png){: .shadow.center.medium}\n\nI have to admit that the fact that my status was [`:coffee_parrot:`](https://github.com/jmhobbs/cultofthepartyparrot.com/issues/55) could have been related to my enthusiastic reply...\n\n## The challenge\n\nAt the time I had only a vague idea of how I would accomplish this. Many suggestions about Internet of Things devices followed my comment. And while a toaster with a version of Linux that will never be patched was intriguing, I wanted to do something bigger.\n\nA few years ago some friends got together and bought me an [Anova Sous Vide](https://anovaculinary.com/), knowing that I loved to cook. What they failed to calculate was that having four kids in eight years was counterproductive to learning the time-tested [French cooking method of sous-vide](https://en.wikipedia.org/wiki/Sous-vide). As such, the tool has not had a whole lot of use in its time.\n\nHowever, at this point I thought of two things:\n\n1. I love a new sous-vide egg bite offering from a well-known coffee shop\n1. The Anova Sous Vide uses [bluetooth low energy (BLE)](https://en.wikipedia.org/wiki/Bluetooth_Low_Energy) to allow you to control it through an app\n\n## The recipe (culinary)\n\nWhile I did like the egg bites from a coffee shop that shall remain nameless, I don't have them all the time. I would give them a 5- _star_ rating, but they cost a few more _bucks_ then I’d like to spend 😉 So I found a [sous-vide egg bite recipe](https://recipes.anovaculinary.com/recipe/sous-vide-egg-bites-bacon-gruyere) on Anova's website.\n\n## The recipe (technology)\n\nOnce I had the recipe, all I needed was to reverse engineer the BLE connection, figure out how to get that to work from the command line, set up a project and get it integrated with GitLab CI/CD... no big deal. Luckily I found a fantastic project called PyCirculate that had already worked out a lot of the BLE connection issues with the Anova. It made me wonder if someone else had automated breakfast before... but I've yet to find them!\n\n![Ingredients...Pinterest picture](https://about.gitlab.com/images/blogimages/breakfast-pintrest.png){: .shadow.center.medium}\n\nNow that I had both recipes and all the ingredients, it was time to _*git*_ crackin'... (I can't tell you how happy I was when I thought of that joke. Did I mention I'm a dad?)\n\n### Setting up the breakfast pipeline\n\nOnce I had that project installed and working on my laptop, I uploaded the code to GitLab in the public repository in the [auto-breakfast group](https://gitlab.com/auto-breakfast/eggs/). Next, I installed [GitLab Runner](https://docs.gitlab.com/runner/) on a [RaspberryPi](https://www.raspberrypi.org/). I registered the Pi as a [specific runner](https://docs.gitlab.com/runner/register/) for my project. I used a [runner tag](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-control-which-jobs-a-runner-can-run) so that I could ensure the cooking job only ran on a device with a Bluetooth connection.\n\n![Specific runner](https://about.gitlab.com/images/blogimages/breakfast-runner.png){: .shadow.small.right.wrap-text}\n\nWhen I run a pipeline on `auto-breakfast/eggs` it uses the RaspberryPi to execute and thus can create the BLE connection to the Anova. With the click of a button in GitLab, my breakfast pipeline was running. All I had to do was sit back, relax, and let GitLab CI/CD do all the work.\n\n![Auto Breakfast pipeline](https://about.gitlab.com/images/blogimages/breakfast-1.JPG){: .shadow.center.medium}\n\n## The results\n\nThe egg bites were great! I even modified the recipe with some great Kerrygold Irish whiskey cheddar cheese. However, I would say that it did take a little more effort to get things set up. However, now that it's done, I have a repeatable, single-button way to cook the recipe again (minus the egg cracking and food processing). Just like CI/CD with a `.gitlab-ci.yml` can help make software build and deployment more reliable and repeatable, it can also make a fantastic breakfast 😎\n\nNot pictured: A very messy kitchen and a very perplexed wife.\n{: .alert .alert-gitlab-purple}\n\n[Photo](https://unsplash.com/photos/I-ykyShydj0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Leti Kugler on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[831,9],{"slug":1331,"featured":6,"template":700},"introducing-auto-breakfast-from-gitlab","content:en-us:blog:introducing-auto-breakfast-from-gitlab.yml","Introducing Auto Breakfast From Gitlab","en-us/blog/introducing-auto-breakfast-from-gitlab.yml","en-us/blog/introducing-auto-breakfast-from-gitlab",{"_path":1337,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1338,"content":1344,"config":1351,"_id":1353,"_type":14,"title":1354,"_source":16,"_file":1355,"_stem":1356,"_extension":19},"/en-us/blog/kali-linux-movingtogitlab",{"title":1339,"description":1340,"ogTitle":1339,"ogDescription":1340,"noIndex":6,"ogImage":1341,"ogUrl":1342,"ogSiteName":686,"ogType":687,"canonicalUrls":1342,"schema":1343},"Kali Linux: Growing Community Contributions with GitLab","Since moving to GitLab in 2019, Kali Linux has gone from company-only contributions to a growing number of community contributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667243/Blog/Hero%20Images/open-source-community.png","https://about.gitlab.com/blog/kali-linux-movingtogitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab helped Kali Linux attract a growing number of community contributions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":1345,"description":1340,"authors":1346,"heroImage":1341,"date":1348,"body":1349,"category":694,"tags":1350},"How GitLab helped Kali Linux attract a growing number of community contributions",[1347],"Nuritzi Sanchez","2021-02-18","[Kali Linux](https://www.kali.org/) is a well-loved Debian-based Linux distribution aimed at advanced [Penetration Testing](https://en.wikipedia.org/wiki/Penetration_test) and Security Auditing. We sat down with Ben Wilson ([@g0tmi1k](https://twitter.com/g0tmi1k)), senior developer at Kali, to hear more about why Kali Linux moved to GitLab and see if they've noticed any changes to their project since adopting GitLab as their DevOps solution.\n\n## Why did you decide to move to GitLab?\n\nWe decided to move from Gitolite to GitLab around April 2019 to make it possible for our community to contribute to Kali. Our previous setup didn't allow anyone to sign up, so the community couldn't help out. Another complication was using a mixture of services such as Google Docs and Phabricator, and we wanted to condense our tool stack. We love that GitLab is a single platform for the whole software development lifecycle.\n\n>> One thing that was important for us is that we didn't want to reinvent the wheel. We tried to choose something open-source with advanced functionality, an active community, and a company behind it. GitLab ticked every box.\n\nAnother factor for our decision was that [GitLab's API is significantly more feature-rich than competitor APIs](https://docs.gitlab.com/ee/api/), which allowed us to automate and integrate into anything that we wanted. For example, we can fully automate the process of remotely forking a repository then apply our configurations.\n\nThat way, we don't have to download a git repository only to push it up again. This is a big time-saver for us and significantly simplifies the workflow. Some of the configuration that we can now automatically apply are:\n\n * Being able to drop the relationship between forks\n * Configure the default branch\n * Disable unused features for a repository (e.g., not everything requires their own wiki)\n * Populate a description for the repository\n * Set up CI paths\n * Set up email notification on any activity to our private mailing list\n\nWe take advantage of various open source tools that leverage GitLab's API, such as [Debian Salsa](https://www.phoronix.com/scan.php?page=news_item&px=Debian-Salsa-Beta). We can use these tools to automate things like updates to email distribution lists and our configuration of GitLab admin settings and repository structure. We contribute any changes we make to these tools back upstream so that other communities can leverage GitLab's API's power the way we do.\n\nAn additional perk to GitLab is its usability. The way you can organize projects makes it a more intuitive experience for people who want to contribute. For example, having sub-groups and projects allows us to keep a clean layout in a folder-like structure. For those interested, you can see how we've organized the [Kali project in GitLab](https://gitlab.com/kalilinux).\n\n## How are you using GitLab at Kali Linux?\n\nWe're using GitLab's [top-tier SaaS version](/pricing/), which is hosted on GitLab.com, thanks to the [GitLab for Open Source program](/solutions/open-source/). Using this version and hosting it on GitLab is easier for us because it's less infrastructure to maintain. We have many unique pieces of infrastructure so it's nice to reduce the load when we can. We're using a wide range of features to manage the entire Kali Linux project, consisting of 564 active repositories.\n\nSome of the most essential [GitLab features](/pricing/feature-comparison/) for us are:\n\n*   **Source Code Management**: We're using GitLab to host the source code to all our packages and build scripts and custom tools.\n*   **[Wiki](https://docs.gitlab.com/ee/user/project/wiki/#wiki)**: We use the wiki functionality for internal documentation. Markdown makes it easy for everyone to contribute.\n*   **[Project management](/solutions/agile-delivery/)**: We track tasks and short/long term goals with GitLab as well as the timelines for our project. We use issue tracking, threads, labels, milestones, weights, and everything else designed for project management.\n*   **[User Permissions](https://docs.gitlab.com/ee/user/permissions.html#permissions)**: We like the functionality of GitLab's user permissions, which allows us to have \"one-off\" users on specific projects as well as automatic expiration after a particular time.\n*   **[Security](https://docs.gitlab.com/ee/user/application_security/)**: As a cybersecurity-focused Linux distro, security is paramount to us. We like that [GitLab has 2FA and project access tokens](https://docs.gitlab.com/ee/security/).\n*   **[Analytics](https://docs.gitlab.com/ee/user/analytics/)**: We are still discovering the functionality here, but we like seeing user statistics around code review and contribution.\n*   **Performance**: We're able to use GitLab's [Content Delivery Network (CDN)](https://en.wikipedia.org/wiki/Content_delivery_network) for great performance across the globe.\n\nWe're hoping to leverage [GitLab's CI/CD features](/solutions/continuous-integration/) and the [container management capabilities](https://docs.gitlab.com/ee/user/packages/container_registry/) more regularly in the near future.\n\nWe're also looking to use [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) for hosting our website instead of our self-hosted WordPress instance. By using [Hugo](https://gohugo.io/hosting-and-deployment/hosting-on-gitlab/), we can write the content with a mixture of HTML and Markdown. Hugo makes it very simple, easy to update, and has straightforward change tracking. GitLab Pages then can serve up the [static output](https://docs.gitlab.com/ee/user/project/web_ide/index.html).\n\nThere were several problems we were facing with WordPress that made us consider moving away, such as plugins that weren't properly maintained, security issues that made us require VPN access to admin pages. The other benefits to the move are that static pages will load faster, and our community can help fix typos on our website through merge requests. Once we make the move to GitLab Pages, we'll start to make greater use of GitLab's CI/CD functions to statically generate the websites.\n\nAnother thing we're becoming more familiar with is all of GitLab's project management features. One of the reasons we chose GitLab instead of other DevOps tools is that it's a single platform for the whole software development lifecycle, and we're looking to use more of its features. Since we're on the top-tier SaaS plan, we have every functionality available to us and we're eager to make use of it.\n\n## What are some of the changes you've noticed in your open-source community since starting to use GitLab?\n\nThe most significant change is that we only allowed contributions from employees before moving to GitLab. Since the switch to GitLab, we've adopted a new mindset and now allow anyone to help out.\n\n>> GitLab's user-friendly design has made it easy for our community to get started, and we've started to receive merge requests from the public as well as bug reports and bug fixes.\n\nIt's been exhilarating to see these contributions land! We are working on increasing these contributions in 2021 with a \"Kali Summer of Code\" and are considering doing a giveaway for people who have made a significant contribution.\n\nWe've also experienced changes to our development practices. For example, we can now have more effective discussions about commit differences and can link to individual commits to pinpoint problems. It's easier to update items from the internal wiki, edit web pages, and merge requests. I also like that GitLab has a built-in automatic save feature to help when you're drafting something and either multitasking or on-the-go.\n\nFinally, GitLab's to-dos and long-term planning features allow us to plan ahead for the future of Kali development. For example, we've replaced ad-hoc solutions done by individuals via emails and to-do list text files on each person's computer since moving to GitLab.\n\n## What are some challenges you've had with implementing GitLab for your community? How did you overcome those challenges?\n\nDuring the switchover from the old system to GitLab, we discovered various things that were hardcoded.\n\nTo help with this, we automated a find and replace, and followed up with various manual searches to ensure that all links and references were located. This ended up taking about two hours. We also left the old web server up for a year, which pointed to the new URL structure to ensure that there weren't any missing links and references. We redid the layout of the site, so it took a while to recreate all the redirects.\n\nAnother challenge was the sheer size of Kali. We had to import roughly 1,000 repositories when we set up GitLab. We managed to migrate most of them in a day and completed the migration within a week once we managed to get the group structure in place. We set up separate groups for different access levels to repositories for build scripts, internal non-public files, Android, phone, build scripts, store, packages, recipes, tools, and websites.\n\nImporting other items (code packages, build scripts, and custom tools from our self-hosted git) took longer because they were in many different formats. When we did the import we cleaned up to determine which items were no longer in use and archived them. The next step was making sure our custom tools were hosted on GitLab and then configuring the tools and packages appropriately. Next, we imported several repositories. We also needed to create files that were not previously tracked in our repository. Finally, we converted our WordPress-based content to Markdown using an [open source project](https://github.com/lonekorean/wordpress-export-to-markdown), then manually verified and cleaned it up.\n\nWe chose not to carry over existing issues because we wanted to have a clean start. In general, we only imported what was important. Everything we ended up with is what we cared about and what we wanted to track.\n\n## What do you think GitLab is doing well in supporting open source communities, and what should GitLab do to improve in this area?\n\nWe really like that GitLab has an outreach program for open source projects with dedicated people for the job role. They actively contacted us to become a [GitLab Open Source Partner](/solutions/open-source/partners/) and we're glad to have joined as one!\n\nOne of the things that we appreciate about GitLab is that the company is open source. The transparency that comes with that allows us, and anyone else, to see the company's progress. GitLab is setting an example for how open source companies can work alongside their communities, and it's something we are learning from too.\n\n## What advice would you have for other open source communities that are looking to implement GitLab?\n\nThe sooner you make the switch, the easier and better! Once you move, you'll see that it's less work to maintain and there are more features to use.\n\nWhen beginning your migration, make sure to set up a test project first to help plan the structure ahead of doing the main project switch. Look up and explore features ahead of time so you know what GitLab can do rather than discover the functionality when using it. GitLab has a [GitLab Learn portal](/learn/), which we hear is going to continue to be improved to help with user education.\n\n## What are some of the new things on the horizon for Kali Linux?\n\n*   [KaBoxer](https://gitlab.com/kalilinux/tools/kaboxer): A framework to manage applications in containers on Kali\n*   New kali.org website using GitLab Pages\n*   Programs to increase community contributions to Kali\n\n## Is there anything else you'd like to share with us that we haven't asked you?\n\nWe have only scratched the surface of what GitLab has offered - and they keep putting in more features. We are planning on taking their upcoming training to make sure we are fully up-to-date on their offerings.\n\n## Last but certainly not least, we have heard a rumor that the founders of Kali are so dedicated to the project that they have Kali logo tattoos. Is this true?\n\nVery true! The original founders both have Kali tattoos, as do various current members.\n\nWe also have some pretty cute baby onesies that are a hit.\n\n![A baby in a Kali Linux onesie](https://about.gitlab.com/images/blogimages/kali_linux_baby.jpg){: .shadow.medium.center}\nKali Linux has some cute baby onesies. [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) Ben Wilson\n{: .note.text-center}\n\n## About Kali Linux\n\n[Kali Linux](https://www.kali.org/) (formerly known as BackTrack-Linux) is a Debian-based Linux distribution aimed at advanced Penetration Testing and Security Auditing. Kali Linux contains several hundred tools targeted toward various information security tasks, such as Penetration Testing, Forensics, and Reverse Engineering. Kali Linux is a multi platform solution, accessible and freely available to information security professionals and hobbyists.\n",[697,269,9],{"slug":1352,"featured":6,"template":700},"kali-linux-movingtogitlab","content:en-us:blog:kali-linux-movingtogitlab.yml","Kali Linux Movingtogitlab","en-us/blog/kali-linux-movingtogitlab.yml","en-us/blog/kali-linux-movingtogitlab",{"_path":1358,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1359,"content":1365,"config":1373,"_id":1375,"_type":14,"title":1376,"_source":16,"_file":1377,"_stem":1378,"_extension":19},"/en-us/blog/leah-petersen-user-spotlight",{"title":1360,"description":1361,"ogTitle":1360,"ogDescription":1361,"noIndex":6,"ogImage":1362,"ogUrl":1363,"ogSiteName":686,"ogType":687,"canonicalUrls":1363,"schema":1364},"From motorcycle stunter to DevOps: Finding love for CI/CD","Switching to GitLab helped a newly minted DevOps engineer grasp the concept of CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663760/Blog/Hero%20Images/image-for-leah-post.jpg","https://about.gitlab.com/blog/leah-petersen-user-spotlight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-06-21\",\n      }",{"title":1366,"description":1361,"authors":1367,"heroImage":1362,"date":1369,"body":1370,"category":804,"tags":1371},"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD",[1368],"Aricka Flowers","2018-06-21","\nWhen professional motorcycle stuntwoman turned developer Leah Petersen switched from Jenkins to GitLab, she was a bit nervous to say the least. Having only worked in tech for nine months, the [Samsung SDS](https://www.samsungsds.com/us/en/index.html) engineer was not enthused about the prospect of having to learn a new application after feeling like she had “just started to get competent” with Jenkins.\n\nAfter a self-described mini pity party, she dove into GitLab head first, jumping into a few big ticket projects to get a handle on the landscape. Within a few short months, Petersen was so impressed by her GitLab CI/CD experience that she felt the need to shout her newfound “love” for continuous integration and continuous delivery from the virtual mountaintop of [her blog](https://leahnp.github.io/2018/moving-from-jenkins-to-gitlab-CI/).\n\nWe recently met up with Petersen to learn more about her transition to the tech world and experience with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Avx_RftRT_o\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Q & A with Leah Petersen, DevOps Engineer\n\n**Where do you work and what does your team do?**\n\nI work for a team in Samsung SDS called the Cloud Native Computing Team, and I'm [a DevOps engineer](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/). We deal primarily with containers in Kubernetes and helping companies modernize and move to the cloud. My team is super unique. We were kind of treated like an incubated startup within Samsung, so we're really given a lot of autonomy to make our own decisions.\n\nOur team was put together about five years ago, and Samsung really made a bet on Kubernetes being the future of orchestrating huge workloads in the cloud. Initially, we were focusing mainly on research and development, contributing to the Kubernetes community and learning who was a part of it, what their motives were, and how we could find our place in it. Over the last year, Samsung has really pivoted our role in the company, and we're looking at how we can help Samsung as a global organization move to Kubernetes and containers.\n\n**Where did you work before Samsung?**\n\nI was a motorcycle stunt rider before I became an engineer, and that career kind of organically grew out of my passion for motorcycles. I started stunting, loved the community and was able to meet people all over the country and travel. Being one of the few women who did it, I organically started getting calls for jobs and gigs. I thought, “If I can do this in my 20s and make this my full-time career, I'm definitely going to take a shot at it,” so I did.\n\nIt was an amazing opportunity and experience to travel the world and meet people all over this planet who are passionate about this crazy thing that I'm also passionate about. And I got to work with a lot of amazing brands and raise awareness about the sport that I love. So, I don't have any regrets about that and cherish the time that I got to spend on a motorcycle professionally.\n\n**How did you move from being a professional motorcycle stunter to a DevOps engineer?**\n\nI had been looking for a new career path and wasn't really sure what I was going to do. I knew that I wanted to build some tangible skills. I wanted skills that had a clear market value, and tech definitely provides that.\n\nI ended up taking an online coding course in Python, and had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun; I really like solving these problems. At that point I started taking more online courses and learning as much as I could for free. Then I ended up finding [Ada Developers Academy](https://www.adadevelopersacademy.org/), and that was the perfect segue into the industry.\n\n> I had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun\n\n**Can you describe how your experience has been as woman in tech?**\n\nYou definitely get a lot of strange reactions being a woman in tech. Walking into a situation, oftentimes people are surprised you're an engineer. You'll get reactions like, “Oh, I thought you were a project manager,” or, “I thought you were a recruiter,” or whatever other stereotype that you brought into the room. That can be discouraging and makes you feel unwelcome in that space. But I think we need women in every part of tech: frontend, backend, DevOps, operations, everything. If your interest is in UX, go for that. But don't let all the men who've been in the industry for 25 years on the operations side of things scare you off either. I really think we need diverse minds and approaches to problems in the whole spectrum of it.\n\nSometimes I forget about the gender disparity in tech because my team, specifically, has a couple of really amazing women who I get to work with every day. So, I'm very fortunate. But I recently went to KubeCon in Copenhagen, and it's a amazing conference with so much energy, but it's a real wake up call when you see the gender disparity there. There's 4,000 guys walking around and you feel like you stick out [or] when you're sitting in an auditorium, look around and realize, “Oh, I'm the only lady here.” It's something that you can't look away from.\n\n**Why did you decide to go into DevOps engineering?**\n\nIn my boot camp classes we were focusing on web development and building Ruby on Rails and Node.js apps. We each had an opportunity to do an internship at companies in Seattle that support the Ada program. Samsung was one of them, and they came in to do a presentation about their involvement in open source and Kubernetes. I had no idea what they were talking about, but Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance and picked Samsung, dove right in, and found my way as I went along. I'm really happy that I chose Kubernetes and to specialize in the cloud.\n\n>Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance, dove right in, and found my way as I went along\n\n**How did you get started with GitLab CI/CD? And how would you describe your transition to the application?**\n\nI always felt like I was fighting with the CI platform we were on prior to GitLab. It was never really functioning how we wanted it to, and something was always kind of failing. The whole reason you have CI/CD is to get visibility into what's happening with your code, right? You want to run your code through this pipeline and make sure there are no bugs, that you’re packaging it correctly and putting it in the places that you need it to be in production. It's this hugely critical component of going from the developer's computer to the world; that's the pipeline. So you really need the visibility to see what is happening every step of the way.\n\nOn the old system, I felt that I just didn't have that visibility. I was digging for the problems and not able to understand where they were coming from, where they were originating from, why they were happening or how to fix them. I feel like GitLab definitely does a great job of assisting the user in finding the origin of a problem, tracing that step back and making it clear where your issues are and when you're having success.\n\n**How has using GitLab impacted your career and workflow?**\n\nThere's a lot of talk about accessibility and user experience in tech. And we all know what it's like to have a bad user experience with a piece of technology; it's the most frustrating thing in the entire world. As a developer, you deal with lots of different tech every single day. When I started using GitLab about a year and a half into my career, it was certainly the first platform where I was like, ‘I feel so at home here. Everything’s fluid. I can find where everything is. I understand what everything is.’ There aren't these big black holes of confusion that have me asking, “Why does this exist and what am I doing here?’”\n\nWith GitLab, everything is just this cheery, happy place. And I really appreciate how it has now set the bar for me when it comes to the way in which a technology should function when I’m working with it.\n\nCover photo by [Rendiansyah Nugroho](https://unsplash.com/photos/JUePy_-uOSI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1372,784,939,697,807,269,830,9],"careers",{"slug":1374,"featured":6,"template":700},"leah-petersen-user-spotlight","content:en-us:blog:leah-petersen-user-spotlight.yml","Leah Petersen User Spotlight","en-us/blog/leah-petersen-user-spotlight.yml","en-us/blog/leah-petersen-user-spotlight",{"_path":1380,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1381,"content":1387,"config":1392,"_id":1394,"_type":14,"title":1395,"_source":16,"_file":1396,"_stem":1397,"_extension":19},"/en-us/blog/lee-tickett-my-gitlab-journey",{"title":1382,"description":1383,"ogTitle":1382,"ogDescription":1383,"noIndex":6,"ogImage":1384,"ogUrl":1385,"ogSiteName":686,"ogType":687,"canonicalUrls":1385,"schema":1386},"From user, to advocate, to contributor: my GitLab journey","Three years (as a user and as a contributor) with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681735/Blog/Hero%20Images/cover_photo.jpg","https://about.gitlab.com/blog/lee-tickett-my-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From user, to advocate, to contributor: my GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":1382,"description":1383,"authors":1388,"heroImage":1384,"date":1389,"body":1390,"category":718,"tags":1391},[1140],"2020-11-13","{::options parse_block_html=\"true\" /}\n\n\n\n\nI have had a passion for technology since before I can remember. Thirteen\nyears ago I took the plunge, quit my day job, and started an IT development\nand support company called [Tickett Enterprises\nLimited](https://www.tickett.net). For the last three years, GitLab has been\na part of my journey.\n\n\n## 3 Years Ago \n\nWe were (and still are) using a helpdesk system we built ourselves. It does\nexactly what we need it to do - and any time it doesn’t, we change it. The\nmost important feature of the system is reporting. Specifically,\nfacilitating our monthly billing process; with a click of a button, we\ngenerate timesheets and invoices for all of our clients.\n\n\nThough I was aware of Git (and GitHub), I had not heard of GitLab. We were\nusing SVN in its most basic form (single repository for all projects and no\nbranching), with an integration so all commits would create notes in our\nhelpdesk.\n\n\n## 2.5 Years Ago\n\nWe decided that SVN was no longer fit for purpose. Our top issues were: \n\n* never knowing whether the code in our repository matched what was deployed\n\n* not being able to work collaboratively on projects\n\n* feature/knowledge limitations\n\n* Git was the industry standard \n\n\nWhile most of these issues were due to the way we were using SVN, we were\nkeen to adopt a more popular system. I don’t remember how I found GitLab,\nbut I did, and spun up a local on-prem instance of Community Edition (CE)\nusing separate projects/repositories and basic branching. If you are\nconsidering running a local instance, I recommend the [Bitnami\nappliance/.ova](https://bitnami.com/stack/gitlab).\n\n\nIt took some time to get used to local vs remote and to remember to push as\nwell as commit, but we picked it up pretty quickly.\n\n\n## 2 Years Ago\n\nWe wanted to use GitLab to help us improve our processes so we:\n\n* built a little UI for project creation (using the GitLab API). This\nensures new projects fit our naming standards, contain our standard template\nfiles, have our standard master/test/dev branches, contain the relevant\nmembers, and use our webhooks\n\n* recreated the helpdesk integration we had with SVN (every commit and\ncomment is replicated as a note on our helpdesk)\n\n* unaware of GitLab EE, we created a custom merge request approval process\nusing webhooks. Our master branch is always protected - a merge request\nrequires 2 approvals from 2 distinct reviewers (one for code and one for\nfunctionality)\n\n\n## 1.5 Years Ago\n\nA bit late to the party, but finally we set up the GitLab runner to automate\nour build, spin up our database, execute our unit tests and report test\ndetails and code coverage. GitLab CI for .NET was not as well documented as\nother use cases leading to a lot of trial and error when setting up the\nrunner.\n\n\nWe are using the Windows runner configured to use a standard shell (which I\nthink is no longer supported). We will either be moving to powershell on\nwindows or possibly using docker images. Here’s a sample .gitlab-ci.yml\n\n\n```yml\n\nstages:\n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.4\"\n  \nbuild:\n stage: build\n script:\n  - 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\nuget restore'\n  - '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\bin\\msbuild\" /t:Restore,Clean,ReBuild /t:Database:Publish /p:Configuration=Debug;Platform=\"Any CPU\" /p:SqlPublishProfilePath=Database.publish.xml'\n  - 'ping 192.168.99.99 -n 1 -w 10000 2>nul || type nul>nul'\n artifacts:\n  paths:\n   - Tests/bin/\n\ntest:\n stage: test\n script:\n  - 'c:\\GitLab-Runner\\opencover\\OpenCover.Console.exe -returntargetcode:1000 -filter:\"+[*]* -[nunit*]* -[*Tests*]*\" -register -target:\"C:\\Program Files (x86)\\NUnit.org\\nunit-console\\nunit3-console.exe\" -targetargs:\"Tests\\Tests.csproj --result=testresult.xml;transform=C:\\gitlab-runner\\nunit3-junit.xslt\"'\n coverage: '/^Visited Branches .*(\\(\\d+\\.?\\d*\\))/'\n dependencies:\n  - build\n artifacts:\n  reports:\n   junit: testresult.xml\n```\n\n\nWe were building another customization to allow us to search for code across\nall repositories. Unfortunately, we hit a limitation because the API did not\nallow searching anything but the default branch.\n\n\nAt this point, while Googling for help getting CI up and running, I learned\nthat GitLab is open-source. So I thought maybe I could extend the API to\nsupport searching any branch. This lead to [my first\ncontribution](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28069).\n\n\n## 1 Year Ago\n\nAt this point, I was completely new to all of the technologies, techniques,\nand best practices used by GitLab but found myself participating in my first\n[GitLab hackathon](https://about.gitlab.com/community/hackathon/). Somehow,\nI managed to take joint first prize!\n\n\nMy first few contributions were achieved by modifying my production GitLab\ninstallation (not ideal). So it was time to get the [GitLab Development Kit\n(GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) up and running.\nThis was certainly not without its challenges (many of which I suspect stem\nfrom me being in the minority of GitLab contributors running Windows).\n\n\nI have since contributed to the [GDK\nproject](https://gitlab.com/gitlab-org/gitlab-development-kit) and joined\nthe GDK office hour calls to help shape the way forward and resolve some of\nthe problems and frustrations.\n\n\nAt this point, I was leearning a lot. Not just about the tools and languages\nbut about the best practices and work ethos within the GitLab team. Better\nyet, I was able to start taking some of these learnings back to the office.\n\n\n## 0.5 Years Ago\n\nI attended GitLab Commit - London 2019. This really helped to confirm my\nsuspicions; we are only scraping the surface of GitLab's capabilities.\n\n\nOn a few occasions, I wondered whether GitLab may not be a good fit for my\ncompany as I watched huge companies like Porsche and Goldman Sachs present.\nA [presentation](https://www.youtube.com/watch?v=t0Eh1sq9r5s) by Huss\nEl-Sheikh from startup 9fin helped ease my concerns.\n\n\nAround this time, I moved from Windows to Ubuntu to make it easier to work\nwith GDK.\n\n\nI continued to learn a lot from my contributions, feedback, and interactions\nwith the GitLab team, again applying what I could back in the office. Much\naround the languages/technologies I hadn’t previously worked with (namely\nruby, postgres and vue), but also other takeaways such as:\n\n* when carrying out code reviews ask questions rather than give instructions\n(“what do you think about x?” is more productive than “change this to y”)\n\n* GitLab CI is capable of automating a lot of what we currently do by hand\n(e.g. code review for best practices)\n\n* always try to add tests when making code changes\n\n\nI am a firm believer of documenting processes, decisions, and rationale.\nThere’s nothing worse than someone saying “we do it this way” without being\nable to back that up with reasoning. With that in mind, we implemented Merge\nRequest Templates to ensure our team was consistent in our approach to\ncoding, testing, and releasing.\n\n\nBy now our development team had plenty of experience with GitLab and we were\nstarting to move our support team over. To help our team leads monitor merge\nrequests, we adopted 2 simple departmental labels (`Support`/`Development`)\nand used our webhook engine to ensure every MR is automatically labelled.\n\n\n## Today / What’s Next\n\nIn preparation for a transition to .NET core, deprecation of the Windows\nshell runner and a desire to start testing our frontend (web), I started\nputting a CI script together using docker and the\nmcr.microsoft.com/dotnet/core/sdk:latest image. The .gitlab-ci.yml looks\nlike;\n\n\n```yml\n\nstages:  \n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.1\"\n\nbuild:\n stage: build\n tags:\n  - docker\n script:\n  - 'dotnet build'\n\ntest:\n stage: test\n tags:\n  - docker\n script:\n  - 'nohup dotnet run --project Web &'\n  - 'apt-get update'\n  - 'apt-get install -y unzip'\n  - 'wget https://chromedriver.storage.googleapis.com/83.0.4103.14/chromedriver_linux64.zip'\n  - 'unzip chromedriver_linux64.zip -d ~/'\n  - 'rm chromedriver_linux64.zip'\n  - 'mv -f ~/chromedriver /usr/local/bin/chromedriver'\n  - 'chown root:root /usr/local/bin/chromedriver'\n  - 'chmod 0755 /usr/local/bin/chromedriver'\n  - 'wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'\n  - 'sh -c ''echo \"deb https://dl.google.com/linux/chrome/deb/ stable main\" >> /etc/apt/sources.list.d/google.list'''\n  - 'apt-get update'\n  - 'apt-get install -y google-chrome-stable'\n  - 'dotnet test -l:trx Tests/Tests.csproj /p:CollectCoverage=true'\n coverage: '/Total\\s*\\|.*\\|\\s(\\d+\\.?\\d*)%\\s*\\|.*\\|/'\n```\n\n\nAnd the tests look something like;\n\n\n```c#\n    public class UiTests : IDisposable\n    {\n        private readonly Process _webServerProcess;\n        private readonly IWebDriver _driver;\n\n        [Fact]\n        public void ClickNavPrivacyPolicy()\n        {\n            _driver.Navigate()\n                .GoToUrl(\"http://localhost:5000/\");\n\n            var link = _driver.FindElement(By.LinkText(\"Privacy\"));\n            link.Click();\n\n            Assert.Equal(\"http://localhost:5000/Home/Privacy\", _driver.Url);\n        }\n\n        public UiTests()\n        {\n            ChromeOptions chromeOptions = new ChromeOptions();\n            chromeOptions.AddArguments(\"headless\", \"no-sandbox\");\n            _driver = new ChromeDriver(chromeOptions);\n\n            if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) return;\n\n            _webServerProcess = new Process\n            {\n                StartInfo = {\n                    WorkingDirectory = Path.Combine(System.AppDomain.CurrentDomain.BaseDirectory, \"..\", \"..\", \"..\", \"..\", \"Web\"),\n                    FileName = $\"dotnet.exe\",\n                    Arguments = \" run\",\n                    UseShellExecute = true,\n                }\n            };\n            _webServerProcess.Start();\n        }\n\n        private void KillWebServer()\n        {\n            if (_webServerProcess != null && !_webServerProcess.HasExited)\n            {\n                _webServerProcess.Kill();\n            }\n        }\n\n        public void Dispose()\n        {\n            _driver.Dispose();\n            KillWebServer();\n        }\n    }\n```\n\n\nYou can see some conditional code in there which allows Selenium tests to\nwork both locally on our development machines and remotely on our GitLab\nrunner. If you have a better way of achieving this, please leave a comment.\nI would love to chat and learn!\n\n\nI also want to start introducing some linting like we see in the GitLab\nproject to enforce rules around code formatting (spaces, carriage returns,\nindentation, etc.). I have started to look at JetBrains Resharper (R#)\ncommand-line but haven’t had enough time to implement it yet. Ideally. I\nwould like to start with just a rule or two and then slowly introduce more,\nbut it looks quite tricky to take this approach. Please let me know if\nyou’ve been able to achieve this!\n\n\nI would also like to lose our helpdesk and start using GitLab issues,\nservice desk, timelogs, etc. I am working on identifying the gaps and\nworking with the product managers to understand whether it is realistic to\nfill those gaps within the GitLab product. Alternatively, I will be looking\nto build some additional “bolt-ons” using webhooks and the API.\n\n\nWhile investigating gaps, I stumbled upon the [GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage) and I expect we'll use\nthis to automate various workflows. I managed to help close a few issues and\neven create a few additional features which would make it work for us by\n[contributing to the GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett).\n\n\nWe also added more labels (`needs code review` & `needs functional review`)\nfor our merge request approval process now. We can see where we are and what\nneeds to be done at a glance. We previously relied on an MR checklist that\nwe are deprecating.\n\n\n![Merge request\nchecklist](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/mr_checklist.png)\n\n\n![Merge requests with\nlabels](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/merge_requests_with_labels.png)\n\n\n## Contributing to GitLab \n\n\nI am very proud to have joined the GitLab Core Team. Thanks to everyone who\nhas held my hand and patiently assisted me with contributions. \n\n\nWith the release of Microsoft Windows Subsystem for Linux v2, I have gone\nback to running Windows on my laptop with GDK running in Ubuntu on WSL2.\nThis is working brilliantly for me at the moment (the way Visual Studio Code\nhandles things especially is really cool).\n\n\nI now have 95 [merged merge\nrequests!](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett)\nand have been helping several others get started contributing (getting GDK\nup and running etc). Once this crazy pandemic is over and we can start to\nsocialise again, I would like to try and start some sort of local\nmeetup/group.\n\n\nI would like to help make it easier to connect GitLab users. I have visions\nof a mechanism to search for others based:\n\n* the size of their user base \n\n* the languages they are using\n\n* the feature they are using\n\n\nAt present, we have several tools (Gitter, Issues, Forum etc) but there is a\nstrong reliance on being engaged and stumbling on questions/support\nrequests. I suspect many of us would be happy to have other users reach out\ndirectly.\n\n\nIf you need any more information around:\n\n* getting your development environment/tools setup on Windows 10\n\n* getting CI working with .NET and SQL Server projects\n\n* building customisations using GitLab webhooks and API\n\n\n...or would like to see a demo of anything discussed above, I would be happy\nto oblige!\n\n\nI would love to connect with others who are either looking to, or already\nusing GitLab for:\n\n* .NET projects\n\n* customer helpdesk \n\n* customer billing (using timelogs)\n\n\nThanks for reading! Here's a picture of me and the family repping with our\nGitLab merch!\n\n\n![The tickett family repping\nGitLab](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/landing_page.png)\n",[109,269,9,918,697,830,807],{"slug":1393,"featured":6,"template":700},"lee-tickett-my-gitlab-journey","content:en-us:blog:lee-tickett-my-gitlab-journey.yml","Lee Tickett My Gitlab Journey","en-us/blog/lee-tickett-my-gitlab-journey.yml","en-us/blog/lee-tickett-my-gitlab-journey",{"_path":1399,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1400,"content":1406,"config":1412,"_id":1414,"_type":14,"title":1415,"_source":16,"_file":1416,"_stem":1417,"_extension":19},"/en-us/blog/live-from-commit-london",{"title":1401,"description":1402,"ogTitle":1401,"ogDescription":1402,"noIndex":6,"ogImage":1403,"ogUrl":1404,"ogSiteName":686,"ogType":687,"canonicalUrls":1404,"schema":1405},"Live from Commit London","We're having a packed day at our first European user conference. Watch this space for the latest news.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/live-from-commit-london","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Live from Commit London\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-09\",\n      }",{"title":1401,"description":1402,"authors":1407,"heroImage":1403,"date":1408,"body":1409,"category":301,"tags":1410},[935],"2019-10-09","\n**9:30AM BST** – GitLab CEO [Sid Sijbrandij](/company/team/#sytses) told attendees at our first European user conference that support for Amazon Web Services' Elastic Kubernetes Service (EKS) will be available later this year. Sid also underscored the importance of the European market. Almost one-third of GitLab's business comes from Europe and 42% of our customers are based in Europe.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">Gitlab Commit London warming up with breakfast networking 🤜🏻💥🚀 cc \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/ke7nsNE7pO\">pic.twitter.com/ke7nsNE7pO\u003C/a>\u003C/p>&mdash; James McLeod (@mcleo_d) \u003Ca href=\"https://twitter.com/mcleo_d/status/1181849833604337667?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**10:00AM BST** – Speed matters, particularly at Porsche AG. Software engineers Alberto Gisbert and Dennis Menge told Commit 2019 attendees how a quest to improve collaboration, reduce tool complexity and achieve a single source of truth led the car manufacturer to GitLab. Porsche started using GitLab in Europe initially, but quickly realized it needed to expand to China, Porsche's largest market, as well. One year into the project, Porsche has more than 660 repositories with more than 250 active users. All told, more than 80,000 pipelines have been triggered.\n\nUp next, Capgemini UK's [Matt Smith](https://twitter.com/Harmelodic) shared how to go from [Zero to K8s: As Fast As Possible](https://gitlabcommit2019london.sched.com/event/UL5X/zero-to-k8s-as-fast-as-possible):\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">Britney mic&#39;d up!\u003Cbr>\u003Cbr>On stage in half an hour 😬\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://t.co/ivQ1V9waBW\">pic.twitter.com/ivQ1V9waBW\u003C/a>\u003C/p>&mdash; Matt Smith (@Harmelodic) \u003Ca href=\"https://twitter.com/Harmelodic/status/1181851029048102912?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nCoding in the blink of an eye!\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">.\u003Ca href=\"https://twitter.com/Harmelodic?ref_src=twsrc%5Etfw\">@Harmelodic\u003C/a> is talking faster than \u003Ca href=\"https://twitter.com/hashtag/terraform?src=hash&amp;ref_src=twsrc%5Etfw\">#terraform\u003C/a> can deploy things :joy: Great live coding :sunglasses: \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/LS0t3GdqHx\">pic.twitter.com/LS0t3GdqHx\u003C/a>\u003C/p>&mdash; Michael Friedrich (@dnsmichi) \u003Ca href=\"https://twitter.com/dnsmichi/status/1181862263680053248?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**11:30AM BST** – How to shift left and bring security more firmly into development was the topic of a mid-morning panel discussion at Commit.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">\u003Ca href=\"https://twitter.com/Shetti?ref_src=twsrc%5Etfw\">@Shetti\u003C/a> of \u003Ca href=\"https://twitter.com/VMware?ref_src=twsrc%5Etfw\">@VMware\u003C/a> leads a panel discussion on security in the software development life cycle with Jeremy Guido, \u003Ca href=\"https://twitter.com/plafoucriere?ref_src=twsrc%5Etfw\">@plafoucriere\u003C/a> and \u003Ca href=\"https://twitter.com/simasotiris?ref_src=twsrc%5Etfw\">@simasotiris\u003C/a>.\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenintech?src=hash&amp;ref_src=twsrc%5Etfw\">#womenintech\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womeninstem?src=hash&amp;ref_src=twsrc%5Etfw\">#womeninstem\u003C/a> \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenwhocode?src=hash&amp;ref_src=twsrc%5Etfw\">#womenwhocode\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/git?src=hash&amp;ref_src=twsrc%5Etfw\">#git\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/opensource?src=hash&amp;ref_src=twsrc%5Etfw\">#opensource\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/security?src=hash&amp;ref_src=twsrc%5Etfw\">#security\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/sdlc?src=hash&amp;ref_src=twsrc%5Etfw\">#sdlc\u003C/a> \u003Ca href=\"https://t.co/lQeQYelTVv\">pic.twitter.com/lQeQYelTVv\u003C/a>\u003C/p>&mdash; Suze Shardlow at #GitLabCommit (@SuzeShardlow) \u003Ca href=\"https://twitter.com/SuzeShardlow/status/1181874495268773888?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nSotiraki Sima, executive director in technology risk at Goldman Sachs, stressed the benefits of starting small and being prepared to continually adapt to new technologies and new tools. [Jeremy Guido](https://fr.linkedin.com/in/jeremyguido), backend engineer with My Data Models, said designating a security leader in a development team can help to make everyone feel more like a stakeholder. And [Philippe Lafoucriere](https://about.gitlab.com/company/team/#plafoucriere), distinguished engineer at GitLab, stressed the role of automation in scaling security throughout the SDLC. The bottom line: it's a process so take it a step at a time.\n\n**1:00PM BST** – What's next for the GitLab tool? [Eric Brinkman](/company/team/#ebrinkman), director of product, dev products, outlined our technology roadmap. He began with Meltano, a six-person startup located within GitLab that is focused on bringing DevOps best practices to DataOps. Eric announced that today [version 1.0 of Meltano](https://meltano.com/blog/meltano-graduates-to-version-1-0/) is available.\n\nAnd that was just the beginning. Value stream management will be coming soon to Manage, Eric said, so users will be able to track efficiency metrics and ultimately receive recommendations. Plan stage will add high and low release requirements related to code and test. In Create, our source code management and code review will get an upgrade with an improved Web IDE and eventually the ability to do live coding. Verify will receive load testing runs by default and Secure will get [fuzzing](/direction/secure/dynamic-analysis/fuzz-testing/) as a built-in part of security testing. Changes to Release will mean automatically staged rollbacks and Configure will invest in run books to improve mean time to recovery. Protect will continue to invest in real-time threat detection capabilities. And finally auto remediation is on the horizon so at some point the largely manual (and often annoying) job of finding and fixing vulnerabilities will be a thing of the past. \"This is something that can truly bring dev, sec and ops together,\" Eric said.\n\nNote: All sessions from Commit London are being recorded and will be available on our [YouTube channel](https://youtube.com/gitlab) in 24-48 hours.\n{: .alert.alert-info}\n",[269,279,1411,1020,9,939],"inside GitLab",{"slug":1413,"featured":6,"template":700},"live-from-commit-london","content:en-us:blog:live-from-commit-london.yml","Live From Commit London","en-us/blog/live-from-commit-london.yml","en-us/blog/live-from-commit-london",{"_path":1419,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1420,"content":1426,"config":1432,"_id":1434,"_type":14,"title":1435,"_source":16,"_file":1436,"_stem":1437,"_extension":19},"/en-us/blog/making-ci-easier-with-gitlab",{"title":1421,"description":1422,"ogTitle":1421,"ogDescription":1422,"noIndex":6,"ogImage":1423,"ogUrl":1424,"ogSiteName":686,"ogType":687,"canonicalUrls":1424,"schema":1425},"Making CI/CD easier with GitLab","The team at Trek10 tries to consider the need for automation and repeatability with everything they do. One team member gives a crash course in GitLab CI/CD and explains how they use it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680423/Blog/Hero%20Images/making-ci-easier-with-gitlab.jpg","https://about.gitlab.com/blog/making-ci-easier-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making CI/CD easier with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rob Ribeiro\"}],\n        \"datePublished\": \"2017-07-13\",\n      }",{"title":1421,"description":1422,"authors":1427,"heroImage":1423,"date":1429,"body":1430,"category":741,"tags":1431},[1428],"Rob Ribeiro","2017-07-13","At [Trek10](https://www.trek10.com/), we always try to consider the need for\nautomation and repeatability with everything that we do. That’s why we focus\non using tools like CloudFormation, [Serverless](/topics/serverless/), and\nCI/CD, as well as building other tools. Recently, I was tasked with doing\nvarious maintenance tasks on a number of internal tools/projects. Some\nneeded upgrades from Node.js 0.10, some needed code fixes, and most needed\nCI/CD. Today, we’re just going to focus on the CI/CD part.\n\n\n\u003C!-- more -->\n\n\nIn spite of my past experience with Jenkins and TeamCity and our team’s\nexperience with AWS (CodePipeline/CodeDeploy), I chose [GitLab\nCI/CD](/topics/ci-cd/) to standardize these projects. The biggest reason for\nthis choice is history. As a project evolves, its CI/CD configuration may\nchange. If you ever need to go back in time, you may have difficulty\ndeploying again. Since GitLab CI/CD is based on a `.gitlab-ci.yml` config\nfile that is committed with the code, as long as a commit built and deployed\nthen, it stands a pretty good chance of building and deploying now. Being\nable to tweak CI/CD without leaving my editor was an additional bonus.\n\n\n### Crash course in GitLab CI/CD\n\n\nGitLab CI/CD relies on having a `.gitlab-ci.yml` file in the root of your\nrepo. CI/CD for each commit is run against the `.gitlab-ci.yml` that is\ncurrent for that commit. The fundamental unit of CI/CD for GitLab is a\n“job”. A job is a construct that runs a bash script against a commit in a\nparticular context. You might have one job to run tests, other jobs to build\nfor staging or production, and other jobs to deploy to particular\nenvironments. In the config file, jobs are represented by top level maps\n(aka “objects”) that are not otherwise “reserved” GitLab CI/CD maps.\nExamples of reserved top level maps: `image` (Docker image in which your\njobs run), `services` (other Docker images that need to run while your jobs\nrun), `before_script` (runs before every `script`), `after_script` (runs\nafter every `script`), `stages` (redefines the stage names and order),\n`variables` (variables available to all jobs), and `cache` (controls what is\ncached between CI/CD runs; good for stuff from your package manager).\n\n\nEvery job must belong to a stage (if left out, `test` is the default).\nStages are run in a sequence, and all of the jobs in a stage run with max\nparallelism available. The default stage sequence is: `build`, `test`,\n`deploy`. Each job also has `before_script`, `after_script`, `variables`,\nand `cache`. Defining these at a job level will override the top-level\nconfiguration. The most important of these is `variables`, because your\nvariables are what make the production deploy job’s context different from\nthe staging deploy job’s context. `variables` is just a map with a bunch of\nkey value pairs. Variables are consumed with a syntax similar to bash:\n`${myVar}`. There are some limitations that you should know:\n\n\n* Variables do not support bash variable expansions, substitutions,\ndefaults, etc.\n\n* Variables do not recurse or have a sense of order of evaluation, but top\nlevel variables can be used in job level variables. See the following\nexamples:\n\n\n```\n\n# You CANNOT do this (referencing a sibling variable in the same map)\n\nvariables:\n    PROD_STAGE_NAME: prod\n    PROD_URL: https://thisismywebsite.com/${PROD_STAGE_NAME}\n```\n\n\n```\n\n# You CAN do this (referencing a top-level variable from a job's variables\nmap)\n\nvariables:\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${PROD_STAGE_NAME}\n```\n\n\n```\n\n# But you CANNOT do something like this (nested variables)\n\nvariables:\n    CURRENT_STAGE: PROD\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${${CURRENT_STAGE}_STAGE_NAME}\n```\n\n\nThat last example gives us a ton of power. We’ll be sure to abuse that as we\ngo.\n\n\nAs mentioned before, jobs run a bash script in a context. So every job must\nhave a `script`. The last big thing that you need is “flow control”. By\ndefault, a job will run on every commit. Using the `only`, `except`, and\n`when` keys allows you to control how jobs are triggered. `only` and\n`except` accept the following options:\n\n\n* Branch names, e.g. `master` or `develop`\n\n* Tag names\n\n* JS style RegExp literals to evaluate against branch/tag names\n\n* These special keywords: `api`, `branches`, `external`, `tags`, `pushes`,\n`schedules`, `triggers`, and `web`\n\n* Using `branches` and `tags` with `only` cause a job to be run for every\nbranch or tag, respectively\n\n* Repo path filters to deal with repo forks\n\n\nOne more important fact: jobs that start with a period character are\ndisabled, e.g.: `.my_disabled_job`\n\n\nThat should be enough to get us started. You can find more [GitLab CI/CD\ndocumentation here](https://docs.gitlab.com/ee/ci/). The most useful bit is\nthe `.gitlab-ci.yml` reference found\n[here](https://docs.gitlab.com/ee/ci/yaml/).\n\n\nAs with any new tool, I got to read and re-read the documentation and make\nsome mistakes getting things right. By the time I was knee-deep in this, I\nrealized there was a need to prevent anyone from having to do this again,\nmyself included. The solution requires two things: a well-designed CI/CD\ntemplate and a way to get that template into all of your new repositories.\nLet’s tackle template design next.\n\n\n### Designing a template\n\n\nThis part is hard to talk about in a completely generic manner. Instead,\nlet’s walk through our use case. Looking at our projects past and present, I\ncould usually bet on these characteristics:\n\n\n* Deploys to AWS (we are an AWS consultancy after all…)\n\n* Uses Serverless framework with Node.js or Python\n\n* May deploy production to multiple regions\n\n* May deploy different stages to different accounts\n\n\nIn addition, I realized that I needed these other options:\n\n\n* May need to “disable” dev/staging from doing real work\n\n* May want one dev environment per branch\n\n\nFinally, we decided on the following deployment strategy:\n\n\n* Production deploys via tags on `master`\n\n* Staging deploys on commits/merges to `master`\n\n* Dev deploys should work for all other branches (we’re not going to\nimplement this one in this post)\n\n\nMy roots are as a software developer, so making things reusable is a core\nskill at this point. A good template is going to make it super easy for the\nintended cases and be fairly adaptable for other uses. Here is the goal:\n\n\n* One script per stage. That means only one test script, one build script,\nand one deploy script. Oh, and keep it DRY.\n\n* Jobs should be as similar as possible, and differences should be tweaked\nby top level variables.\n\n\nLet’s focus on that single script per stage. We’re not going to cover how to\nwrite the deployment script, but we’ll focus on the deploy stage. But let’s\nsay we start with a deployment job like this:\n\n\n```\n\ndeploy:production:\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n    only:\n        - tags\n```\n\n\n\nNow we could copy and tweak this for staging and dev, but that’s not what\nwe’re after. First, let’s break the script off to a reusable chunk and use\nit in our staging deploy:\n\n\n```\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n        PRODUCTION: \"true\"\n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::0987654321:role/gitlab-ci-deployment\n        STAGE_NAME: staging\n        REGION: us-east-1\n        ACCOUNT: \"0987654321\"\n    only:\n        - master\n```\n\n\nUsing YAML anchors and references, we can inject the script into all of our\ndeployment jobs. Notice that the deployment script is disabled. This is\nbecause we don’t want it to run in parallel with all of our intended jobs.\nWe also added a `PRODUCTION` environment variable to just the production\ndeploy to allow our script to pick that up too. If your code knows about\nthis, you can use this to turn on/off production-only features. Now, we can\nmake this cleaner and easier for our developers by pulling all of the\n`variables` to a top-level variables map at the top of the file:\n\n\n```\n\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD_REGION: us-east-1\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${PROD_STAGE_NAME}\n        REGION: ${PROD_REGION}\n        ACCOUNT: ${PROD_ACCOUNT}\n        PRODUCTION: \"true\"        \n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\n\n\nNow, that’s looking more reusable, and we have accomplished our second goal\nof making the jobs very similar and controlled by top-level variables. This\nmakes it easy for anyone who fits the template’s use case perfectly to reuse\nit. We could easily add the dev environment, but we’ll skip that in favor of\nillustrating multi-region production deploys:\n\n\n```\n\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD1_REGION: us-east-1\n    PROD2_REGION: us-west-2\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\n.production_variables\n    DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n    STAGE_NAME: ${PROD_STAGE_NAME}\n    ACCOUNT: ${PROD_ACCOUNT}\n    PRODUCTION: \"true\"    \n\ndeploy:production_1: &deploy_production\n    \u003C\u003C: *deployment_script\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD1_REGION}\n    only:\n        - tags\n\ndeploy:production_2:\n    \u003C\u003C: *deploy_production\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD2_REGION}        \n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\n\nNotice that we have changed the job names to reflect having multiple\nregions. In addition, we are making use of YAML anchors and references to\ncopy the entire `deploy:production_1` job into `deploy:production_2` and\nthen we just override the `REGION` variable. This makes adding additional\nregions super easy.\n\n\nWhat’s more useful at this point is that, as long as you have made your\nscript flexible enough, you can now distribute this to your development team\nas a template. If their project fits the script and configuration perfectly,\nthey should just have to fill in the correct values for the top-level\nvariables and go. For those needing something different, they should\nhopefully be able to just tweak the script. Now, we just need to solve the\nproblem of making sure that they actually use the template…\n\n\n### Automatic CI/CD injection with GitLab and AWS Lambda\n\n\nI was inspired by GitHub’s option to select a .gitignore and license during\nthe repo creation process. What if we could have that for CI? Forking GitLab\nand figuring out how to hack this in did not sound like a quick or easy\nthing to do. However, after a little research, I found that we could use a\nsystem hook to trigger a Lambda that could inject the desired template via\nthe commit API. This part is not as interesting to read about, so we did one\nbetter: we have open sourced this tool so you can deploy it in your\nenvironment. Check out the repo\n[here](https://github.com/trek10inc/gitlab-boilerplate-injector). And if\nyou’re looking for someone to help you implement these and other awesome\nautomations and AWS solutions, we would love to talk to you. Feel free to\nreach out to us at info@trek10.com for more. Thanks for reading!\n\n\n## About the Guest Author\n\n\nRob has spent his career honing his interpersonal, technical, and problem\nsolving skills. He spent five years in customer service and management,\nfollowed by over five years in software development and consulting. He has\nexperience working and consulting for everything from startups to Fortune\n500 enterprises in a variety of industries including manufacturing,\nhealthcare, and finance. Rob has earned a MS in Applied Mathematics and\nComputer Science from Indiana University and a BS in Pharmaceutical Sciences\nfrom Purdue University.\n",[109,9],{"slug":1433,"featured":6,"template":700},"making-ci-easier-with-gitlab","content:en-us:blog:making-ci-easier-with-gitlab.yml","Making Ci Easier With Gitlab","en-us/blog/making-ci-easier-with-gitlab.yml","en-us/blog/making-ci-easier-with-gitlab",{"_path":1439,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1440,"content":1446,"config":1452,"_id":1454,"_type":14,"title":1455,"_source":16,"_file":1456,"_stem":1457,"_extension":19},"/en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"title":1441,"description":1442,"ogTitle":1441,"ogDescription":1442,"noIndex":6,"ogImage":1443,"ogUrl":1444,"ogSiteName":686,"ogType":687,"canonicalUrls":1444,"schema":1445},"Making the case for a DevOps platform: What data and customers say","Don't just take our word for why a DevOps platform means better DevOps and faster, safer releases: here's what the latest data shows and how customers have benefitted.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making the case for a DevOps platform: What data and customers say\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":1441,"description":1442,"authors":1447,"heroImage":1443,"date":1448,"body":1449,"category":1143,"tags":1450},[935],"2021-09-08","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIn the struggle to release safer software faster, development teams are increasingly choosing a DevOps platform to help them get there. In our [2021 Global DevSecOps Survey](/developer-survey/) we asked respondents what their DevOps practices included and a \"DevOps platform\" was among the top four choices, right next to CI/CD, test automation, and DevSecOps.\n\nWe're of course bullish on the idea of a DevOps platform, but we're far from alone. Here's a fresh look at how the data – and the customers – support the optimistic trajectory of a DevOps platform.\n\n## DevOps is hot\n\nThe DevOps market was worth $6 billion in 2020, according to Global Industry Analysts, and five-year growth forecasts range from $17 billion to as much as $23 billion, depending on the firm. \n\n**[Watch a [deep dive into GitLab's DevOps Platform](https://www.youtube.com/watch?v=wChaqniv3HI)]**\n\nThis probably doesn't need saying, but one reason the market is so strong is that DevOps works. In late 2020, Forrester Research conducted \"The State of Modern Technology Operations Q4 2020,\" and concluded [\"the DevOps hypothesis is sound\"](https://go.forrester.com/blogs/the-devops-hypothesis-is-sound-introducing-the-2020-state-of-modern-technology-operations-survey/). The report went further to say that companies successfully working in a DevOps/Agile model were able to release faster and thus have higher revenue growth. \n\n## A DevOps platform is the logical next step\n\nBut in order to do DevOps a team needs tools, and too many tools results in a toolchain, which is where things can get very messy quickly. Time consuming handoffs, integrations and maintenance lead to what Forrester calls the \"DevOps tax\" of roughly 10%, meaning teams have to spend that much of their time each month just trying to keep the toolchains running. (In [our 2021 Survey](/developer-survey/), the tax was even higher: 20% of survey takers said they spend between 11% and 20% of their time just on toolchain maintenance and integration).\n\n**[Use a DevOps platform to [avoid the DevOps tax](/topics/devops/use-devops-platform-to-avoid-devops-tax/)]**\n\nA DevOps platform with end-to-end visibility and everything in one place eliminates the tax and boosts DevOps performance. Nearly 12% of survey respondents told us that adding a DevOps platform has allowed them to release software faster. Overall, our survey takers said the use of a DevOps platform resulted in better DevOps, improved collaboration, easier automation and more comprehensive visibility/traceability. \n\nOne developer put it succinctly: \"[Using a DevOps platform] means reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.\"\n\nAnd if all of that wasn't enough, a single DevOps platform gives *everyone* in the company the ability to see and participate in the process. In fact, 23% of our survey takers said everyone in their company – not just Dev and Ops – actually uses the DevOps platform. \n\n## DevOps platforms in the real world\n\nHow do teams really take advantage of a DevOps platform?\n\n[BI Worldwide](/customers/bi-worldwide/), a global engagement agency, found the ability to tie all the processes together made a difference. \"One tool for SCM+CI/CD was a big initial win,\" says Adam Dehnel, product architect at BI. \"Now wrapping security scans into that tool as well has already increased our visibility into security vulnerabilities. The integrated Docker registry has also been very helpful for us. Issue/Product management features let everyone operate in the same space regardless of role.\"\n\n**[How to [get the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nLess turned out to be more at [Glympse](/customers/glympse/), a geo-location sharing service provider that consolidated close to 20 different tools into GitLab. \"Development can move much faster when engineers can stay on one page and click buttons to release auditable changes to production and have easy rollbacks; everything is much more streamlined,\" explains Zaq Wiedmann, lead software engineer at Glympse. \"Within one sprint, just 2 weeks, Glympse was able to implement security jobs across all of their repositories using GitLab's CI templates and their pre-existing Docker-based deployment scripts.\"\n\nWant a more detailed look at the role a DevOps platform can play in your organization? Explore our [comprehensive guide to DevOps platforms](/topics/devops-platform/).\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[830,1451,9],"developer survey",{"slug":1453,"featured":6,"template":700},"making-the-case-for-a-devops-platform-what-data-and-customers-say","content:en-us:blog:making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","Making The Case For A Devops Platform What Data And Customers Say","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"_path":1459,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1460,"content":1466,"config":1472,"_id":1474,"_type":14,"title":1475,"_source":16,"_file":1476,"_stem":1477,"_extension":19},"/en-us/blog/monkton-moves-to-gitlab-customer-story",{"title":1461,"description":1462,"ogTitle":1461,"ogDescription":1462,"noIndex":6,"ogImage":1463,"ogUrl":1464,"ogSiteName":686,"ogType":687,"canonicalUrls":1464,"schema":1465},"Monkton's journey to GitLab: Focusing on automation","Monkton is migrating from a suite of disparate tools to GitLab, enabling them to better help their customers build safe, secure mobile apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670123/Blog/Hero%20Images/moving-to-gitlab-cover.png","https://about.gitlab.com/blog/monkton-moves-to-gitlab-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Monkton's #movingtogitlab story: Going all in on automation and repeatability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"},{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":1467,"description":1462,"authors":1468,"heroImage":1463,"date":1469,"body":1470,"category":804,"tags":1471},"Monkton's #movingtogitlab story: Going all in on automation and repeatability",[825,1368],"2019-05-21","\n\nEven with all the [#movingtogitlab](/blog/movingtogitlab/) excitement last year, it never gets old to hear about folks migrating to us.\nSo when Harold Smith, CEO and co-founder of [Monkton Incorporated](https://monkton.io/) – a company dedicated to helping enterprises build safe, secure, and compliant mobile solutions – wrote about [moving Monkton to GitLab](https://medium.com/@h3smith/migration-to-gitlab-dde59fc98315) earlier this year, we asked him to sit down with us to talk about the whys and hows.\n\n## From hodge podge of tools, to consolidated lifecycle\n\n\"We’ve been using some of your competitors’ tools.\nIt sort of became a hodge podge of tools – they’re still good tools, but there are different tools to do different things in the development life cycle. We had known GitLab had existed for a while.\nAnd I think, like many others who know about GitLab, it was an assumption on our end that it's just a source control repository.\nThen we started to realize and peel back a little bit of everything GitLab does – the continuous integration, integrations with other services, the whole pipeline.\nWe really started to focus on it and say, 'This is something we should spend time looking into and investing in.'\n\n\"It turned out to be a really good investment of time – we’ve seen time savings just in our ability to watch projects, our onboarding.\nIt’s cutting out a lot of the managing of all these different tools and different servers.\nIt’s just one thing to go in and manage that does most of the work we need.\nIt's also a huge advantage for us and our customers operating under the constraints of a higher-security environment, that we're able to do continuous integration and development, secure DevOps, in a secure environment that passes their auditing needs.\n\n>It’s cutting out a lot of the managing of all these different tools and different servers.\nIt’s just one thing to go in and manage that does most of the work we need.\n\n\"A lot of tools we were using, like some of the other continuous integration tools, are all open source software, which is great.\nBut that comes with some responsibilities: you need to really dig to figure out how to manage it correctly, how to set things up.\nSo, that was probably the biggest disadvantage of working with a collection of open source tools that didn’t have the proper documentation that we needed to move forward.\nSo, once we started looking at GitLab, it really enabled us to consolidate those things.\nAll the documentation is one place. The services that were available …\nIt was really easy to figure out what we needed to do.\nAnd your support has been a big help as well in enabling us to rapidly deliver and stand up these environments.\n\n\"Before, some of our processes were manual, like uploading code scans to Fortify.\nWe’ve automated all of that now on specific branches of the software that we’re building.\nSo, it’s taken out those manual processes that had to go through the checks.\nWhen we build a mobile application and push it through the pipeline, we’re working on how can we automatically publish that to MDM.\nSo, as soon as that code is checked in, scanned, what’s the process to get that into production?\nAnd that’s where we’ve focused a lot of effort of just entirely automation.\"\n\n## Automate all.the.things.\n\n\"Our collective vision within Monkton, and working with you at GitLab, and all these other companies, is how do we automate and take out human error from the equation?\nOur goal is that the moment code is checked in and has been reviewed, the testing lifecycle, the deployment lifecycle, the security vulnerability scanning lifecycle, should all be automated.\nSo, it’s more of humans reviewing reports at the end versus humans having to do the inspections themselves. We really envisioned that these tools could do a much better job than humans can.\n\n\"We’re not trying to replace human jobs. But how can we free people up to do what people do best, versus laborious efforts like pen testing mobile applications or pen testing web applications?\nA lot of that can be automated through scripting tools – Amazon Device Farm – all of which GitLab can automate and push out.\nSo, we’re focusing on what tools can we bring in to automate that process, tie them into GitLab, and automate everything. Or virtually everything.\"\n\n## Repeatability is key\n\n\"Repeatability is probably from our vantage point, one of the cornerstones of what we have to be able to do.\nIf we have a Department of Defense customer that builds a hundred mobile apps using our software, and they discover a vulnerability in one of them – if there’s not a repeatable process to build and deliver the solutions, it would take a year to update those hundred mobile apps if they’re doing it in a very siloed environment.\nBut with a repeatable process, they could change it out once and propagate it out, they can patch and push everything within an hour.\nA repeatable process allows you to have repeatable, consistent outcomes every single time, so you know that you can trust the process as part of your security program versus maybe a hodge podge of different tools and manual processes.\"\n\n## Lessons from the migration process\n\n\"It’s been a learning opportunity for us to see what are the best practices that we can collectively share – even with you at GitLab, there might be things that we’re all collectively learning, that we can use to help the community together.\nBecause this isn’t just a proprietary company effort on our end and your end, or even our customers’ end.\nI look at it as a good learning experience for all of us to improve processes, security, compliance, and everything that goes along with that.\"\n\nHere's a bit more from our chat with Harold:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/kT5qZ8W7yXM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n",[269,233,697,9],{"slug":1473,"featured":6,"template":700},"monkton-moves-to-gitlab-customer-story","content:en-us:blog:monkton-moves-to-gitlab-customer-story.yml","Monkton Moves To Gitlab Customer Story","en-us/blog/monkton-moves-to-gitlab-customer-story.yml","en-us/blog/monkton-moves-to-gitlab-customer-story",{"_path":1479,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1480,"content":1486,"config":1493,"_id":1495,"_type":14,"title":1496,"_source":16,"_file":1497,"_stem":1498,"_extension":19},"/en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"title":1481,"description":1482,"ogTitle":1481,"ogDescription":1482,"noIndex":6,"ogImage":1483,"ogUrl":1484,"ogSiteName":686,"ogType":687,"canonicalUrls":1484,"schema":1485},"How to set up multi-account AWS SAM deployments with GitLab CI/CD","Our guest author, an AWS Serverless hero, shares how to automate SAM deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/multi-account-aws-sam-deployments-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up multi-account AWS SAM deployments with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Forrest Brazeal\"}],\n        \"datePublished\": \"2019-02-04\",\n      }",{"title":1481,"description":1482,"authors":1487,"heroImage":1483,"date":1489,"body":1490,"category":741,"tags":1491},[1488],"Forrest Brazeal","2019-02-04","I've been working with [serverless](/topics/serverless/) applications in AWS\nfor about three years – that makes me an old salt in serverless terms! So I\nknow that deploying and maintaining a serverless app can be tricky; the\ntooling often has critical gaps.\n\n\nAWS's [SAM (Serverless Application\nModel)](https://aws.amazon.com/serverless/sam/) is an open source framework\nthat makes it easier to define AWS resources – such as Lambda functions, API\nGateway APIs and DynamoDB tables – commonly used in serverless applications.\nOnce you lay out your app in a SAM template, the next thing you need is a\nconsistent, repeatable way to get that template off your laptop and deployed\nin the cloud.\n\n\nYou need CI/CD.\n\n\nI've used several different [CI/CD systems](/topics/ci-cd/) to automate SAM\ndeployments, and I always look for the following features:\n\n\n- A single deployment pipeline that can build once and securely deploy to\nmultiple AWS accounts (dev, staging, prod).\n\n- Dynamic feature branch deployments, so serverless devs can collaborate in\nthe cloud without stepping on each other.\n\n- Automated cleanup of feature deployments.\n\n- Review of our SAM application directly integrated with the CI/CD tool's\nuser interface.\n\n- Manual confirmation before code is released into production.\n\n\nIn this post, we'll find out how [GitLab\nCI](/solutions/continuous-integration/) can check these boxes on its way to\ndelivering effective CI/CD for AWS SAM. You can follow along using [the\nofficial example code, available\nhere](https://gitlab.com/gitlab-examples/aws-sam).\n\n\n## Multi-account AWS deployments\n\n\nWe'll want to set up our deployment pipeline across multiple AWS accounts,\nbecause accounts are the only true security boundary in AWS. We don't want\nto run any risk of deploying prod data in dev, or vice versa. Our\nmulti-account setup will look something like this:\n\n\nAny time we work with multiple AWS accounts, we need cross-account [IAM\nroles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in\norder to authorize deployments. We'll handle this task through the following\nsteps. (All referenced scripts are available in the [example\nrepo](https://gitlab.com/gitlab-examples/aws-sam))\n\n\n### 1\\. Establish three AWS accounts for development, staging, and\nproduction deployments\n\n\nYou can use existing AWS accounts if you have them, or [provision new ones\nunder an AWS\nOrganization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html).\n\n\n### 2\\. Set up GitLab IAM roles in each account\n\n\nRun the following AWS CLI call with admin credentials in each of the three\naccounts:\n\n\n```\n\naws cloudformation deploy --stack-name GitLabCIRoles --template-file\nsetup-templates/roles.yml --capabilities CAPABILITY_NAMED_IAM\n--parameter-overrides CIAccountID=\"\u003CAWS Account ID where your GitLab CI/CD\nrunner lives>\" CIAccountSTSCondition=\"\u003CThe aws:userid for the IAM principal\nused by the Gitlab runner>\"\n  ```\n\nReplace `CIAccountID` and `CIAccountSTSCondition` as indicated with values\nfrom the AWS account where your GitLab CI/CD runner exists. (Need help\nfinding the `aws:userid` for your runner’s IAM principal? Check out [this\nguide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable).)\n\n\nThis CloudFormation template defines two roles: `SharedServiceRole` and\n`SharedDeploymentRole`. The `SharedServiceRole` is assumed by the GitLab\nCI/CD runner when calling the AWS CloudFormation service. This role trusts\nthe GitLab CI/CD runner's role. It has permissions to call the\nCloudFormation service, pass a role via IAM, and access S3 and CloudFront:\nnothing else. This role is not privileged enough to do arbitrary AWS\ndeployments on its own.\n\n\nThe `SharedDeploymentRole`, on the other hand, has full administrative\naccess to perform any AWS action. A such, it cannot be assumed directly by\nthe GitLab CI/CD runner. Instead, this role must be \"passed\" to\nCloudFormation using the service's `RoleArn` parameter. The CloudFormation\nservice trusts the `SharedDeploymentRole` and can use it to deploy whatever\nresources are needed as part of the pipeline.\n\n\n### 3\\. Create an S3 bucket for CI artifacts\n\n\nGrab the AWS account ID for each of your development, staging, and\nproduction accounts, then deploy this CloudFormation template **in the\naccount where your GitLab CI/CD Runner exists**:\n\n\n`aws cloudformation deploy --stack-name GitLabCIBucket --template-file\nsetup-templates/ci-bucket.yml --parameter-overrides DevAwsAccountId=\"\u003CAWS\nAccount ID for dev>\" StagingAwsAccountId=\"\u003CAWS Account ID for staging>\"\nProdAwsAccountId=\"\u003CAWS Account ID for prod>\" ArtifactBucketName=\"\u003CA unique\nname for your bucket>\"`\n\n\nThis CloudFormation template creates a centralized S3 bucket which holds the\nartifacts created during your pipeline run. Artifacts are created once for\neach branch push and reused between staging and production. The bucket\npolicy allows the development, test, and production accounts to reference\nthe same artifacts when deploying CloudFormation stacks -- checking off our\n\"build once, deploy many\" requirement.\n\n\n### 4\\. Assume the `SharedServiceRole` before making any cross-account AWS\ncalls\n\nWe have provided the script `assume-role.sh`, which will assume the provided\nrole and export temporary AWS credentials to the current shell. It is\nsourced in the various `.gitlab-ci.yml` build scripts.\n\n\n## Single deployment pipeline\n\n\nThat brings us to the `.gitlab-ci.yml` file you can see at the root of our\nexample repository. GitLab CI/CD is smart enough to dynamically create and\nexecute the pipeline based on that template when we push code to GitLab. The\nfile has a number of variables at the top that you can tweak based on your\nenvironment specifics.\n\n\n### Stages\n\n\nOur Gitlab CI/CD pipeline contains seven possible stages, defined as\nfollows:\n\n\n![Multi-account AWS SAM deployment model with GitLab\nCI](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-model.png){:\n.shadow.medium.center}\n\n\n```yaml\n\nstages:\n - test\n - build-dev\n - deploy-dev\n - build-staging\n - deploy-staging\n - create-change-prod\n - execute-change-prod\n```\n\n\n![Deployment lifecycle\nstages](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-lifecycle-stages.png){:\n.shadow.medium.center}\n\n\n\"Stages\" are used as a control flow mechanism when building the pipeline.\nMultiple build jobs within a stage will run in parallel, but all jobs in a\ngiven stage must complete before any jobs belonging to the next stage in the\nlist can be executed.\n\n\nAlthough seven stages are defined here, only certain ones will execute,\ndepending on what kind of Git action triggered our pipeline. We effectively\nhave three stages to any deployment: a \"test\" phase where we run unit tests\nand dependency scans against our code, a \"build\" phase that packages our SAM\ntemplate, and a \"deploy\" phase split into two parts: creating a\n[CloudFormation change\nset](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)\nand then executing that change set in the target environment.\n\n\n#### Test\n\n\nOur `.gitlab-ci.yml` file currently runs two types of tests: unit tests\nagainst our code, and dependency scans against our third-party Python\npackages.\n\n\n##### Unit tests\n\n\nUnit tests run on every branch pushed to the remote repository. This\nbehavior is defined by the `only: branches` property in the job shown below:\n\n\n```yaml\n\ntest:unit:\n stage: test\n only:\n   - branches\n script: |\n   if test -f requirements.txt; then\n       pip install -r requirements.txt\n   fi\n   python -m pytest --ignore=functions/\n```\n\n\nEvery GitLab CI/CD job runs a script. Here, we install any dependencies,\nthen execute Python unit tests.\n\n\n##### Dependency scans\n\n\n[Dependency\nscans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/),\nwhich can take a few minutes, run only on code pushed to the master branch;\nit would be counterproductive for developers to wait on them every time they\nwant to test code.\n\n\nThese scans use a hardcoded, standard Docker image to mount the code and run\n\"Docker in Docker\" checks against a database of known package\nvulnerabilities. If a vulnerability is found, the pipeline will log the\nerror without stopping the build (that's what the `allow-failure: true`\nproperty does).\n\n\n#### Build\n\n\nThe build stage turns our SAM template into CloudFormation and turns our\nPython code into a valid AWS Lambda deployment package. For example, here's\nthe `build:dev` job:\n\n\n```yaml\n\nbuild:dev:\n stage: build-dev\n \u003C\u003C: *build_script\n variables:\n   \u003C\u003C: *dev_variables\n artifacts:\n   paths:\n     - deployment.yml\n   expire_in: 1 week\n only:\n   - branches\n except:\n   - master\n```\n\n\nWhat's going on here? Note first the combination of `only` and `except`\nproperties to ensure that our development builds happen only on pushes to\nbranches that aren't `master`. We're referring to `dev_variables`, the set\nof development-specific variables defined at the top of `.gitlab-ci.yml`.\nAnd we're running a script, pointed to by `build_script`, which packages our\nSAM template and code for deployment using the `aws cloudformation package`\nCLI call.\n\n\nThe artifact `deployment.yml` is the CloudFormation template output by our\npackage command. It has all the implicit SAM magic expanded into\nCloudFormation resources. By managing it as an artifact, we can pass it\nalong to further steps in the build pipeline, even though it isn't committed\nto our repository.\n\n\n#### Deploy\n\nOur deployments use AWS CloudFormation to deploy the packaged application in\na target AWS environment.\n\n\nIn development and staging environments, we use the `aws cloudformation\ndeploy` command to create a change set and immediately execute it. In\nproduction, we put a manual \"wait\" in the pipeline at this point so you have\nthe opportunity to review the change set before moving onto the \"Execute\"\nstep, which actually calls `aws cloudformation execute-changeset` to update\nthe underlying stack.\n\n\nOur deployment jobs use a helper script, committed to the top level of the\nexample repository, called `cfn-wait.sh`. This script is needed because the\n`aws cloudformation` commands don't wait for results; they report success as\nsoon as the stack operation starts. To properly record the deployment\nresults in our job, we need a script that polls the CloudFormation service\nand throws an error if the deployment or update fails.\n\n\n## Dynamic feature branch deployments and Review Apps\n\n\n![Dynamic feature branch deployments and Review\nApps](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/dynamic-feature-branch-deployments.png){:\n.shadow.medium.center}\n\n\nWhen a non-master branch is pushed to GitLab, our pipeline runs tests,\nbuilds the [updated source\ncode](/solutions/source-code-management/), and deploys and/or\nupdates the changed CloudFormation resources in the development AWS account.\nWhen the branch is merged into master, or if someone clicks the \"Stop\"\nbutton next to the branch's environment in GitLab CI, the CloudFormation\nstack will be torn down automatically.\n\n\nIt is perfectly possible, and indeed desirable, to have multiple development\nfeature branches simultaneously deployed as live environments for more\nefficient parallel feature development and QA. The serverless model makes\nthis a cost-effective strategy for collaborating in the cloud.\n\n\nIf we are dynamically deploying our application on every branch push, we\nmight like to view it as part of our interaction with the GitLab console\n(such as during a code review). GitLab supports this with a nifty feature\ncalled [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). Review\nApps allow you to specify an \"environment\" as part of a deployment job, as\nseen in our `deploy:dev` job below:\n\n\n```yaml\n\ndeploy:dev:\n \u003C\u003C: *deploy_script\n stage: deploy-dev\n dependencies:\n   - build:dev\n variables:\n   \u003C\u003C: *dev_variables\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   url: https://${CI_COMMIT_REF_NAME}.${DEV_HOSTED_ZONE_NAME}/services\n   on_stop: stop:dev\n only:\n   - branches\n except:\n   - master\n```\n\n\nThe link specified in the `url` field of the `environment` property will be\naccessible in the `Environments` section of GitLab CI/CD or on any merge\nrequest of the associated branch. (In the case of the sample SAM application\nprovided with our example, since we don't have a front end to view, the link\njust takes you to a GET request for the `/services` API endpoint and should\ndisplay some raw JSON in your browser.)\n\n\n![Link to live\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/link-live-environment.png){:\n.shadow.medium.center}\n\n\nThe `on_stop` property specifies what happens when you \"shut down\" the\nenvironment in GitLab CI. This can be done manually or by deleting the\nassociated branch. In the case above, we have stopped behavior for dev\nenvironments linked to a separate job called `stop:dev`:\n\n\n```yaml\n\nstop:dev:\n stage: deploy-dev\n variables:\n   GIT_STRATEGY: none\n   \u003C\u003C: *dev_variables\n \u003C\u003C: *shutdown_script\n when: manual\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   action: stop\n only:\n   - branches\n except:\n   - master\n```\n\n\nThis job launches the `shutdown_script` script, which calls `aws\ncloudformation teardown` to clean up the SAM deployment.\n\n\nFor safety's sake, there is no automated teardown of staging or production\nenvironments.\n\n\n## Production releases\n\n\n![Production\nreleases](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/production-releases.png){:\n.shadow.medium.center}\n\n\nWhen a change is merged into the master branch, the code is built, tested\n(including dependency scans) and deployed to the staging environment. This\nis a separate, stable environment that developers, QA, and others can use to\nverify changes before attempting to deploy in production.\n\n\n![Staging\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/staging-environment.png){:\n.shadow.medium.center}\n\n\nAfter deploying code to the staging environment, the pipeline will create a\nchange set for the production stack, and then pause for a manual\nintervention. A human user must click a button in the Gitlab CI/CD\n\"Environments\" view to execute the final change set.\n\n\n## Now what?\n\n\nStep back and take a deep breath – that was a lot of information! Let's not\nlose sight of what we've done here: we've defined a secure, multi-account\nAWS deployment pipeline in our GitLab repo, integrated tests, builds and\ndeployments, and successfully rolled a SAM-defined serverless app to the\ncloud. Not bad for a few lines of config!\n\n\nThe next step is to try this on your own. If you'd like to start with our\nsample \"AWS News\" application, you can simply run `sam init --location\ngit+https://gitlab.com/gitlab-examples/aws-sam` to download the project on\nyour local machine. The AWS News app contains a stripped-down,\nsingle-account version of the `gitlab-ci.yml` file discussed in this post,\nso you can try out deployments with minimal setup needed.\n\n\n## Further reading\n\n\nWe have barely scratched the surface of GitLab CI/CD and AWS SAM in this\npost. Here are some interesting readings if you would like to take your work\nto the next level:\n\n\n### SAM\n\n\n- [Implementing safe AWS Lambda deployments with AWS SAM and\nCodeDeploy](https://aws.amazon.com/blogs/compute/implementing-safe-aws-lambda-deployments-with-aws-codedeploy/)\n\n- [Running and debugging serverless applications locally using the AWS SAM\nCLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-test-and-debug.html)\n\n\n### GitLab CI\n\n\n- [Setting up a GitLab Runner on\nEC2](https://hackernoon.com/configuring-gitlab-ci-on-aws-ec2-using-docker-7c359d513a46)\n\n- [Scheduled\npipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\n\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\n\nPlease [let me know](https://twitter.com/forrestbrazeal) if you have further\nquestions!\n\n\n### About the guest author\n\n\nForrest Brazeal is an [AWS Serverless\nHero](https://aws.amazon.com/developer/community/heroes/forrest-brazeal/).\nHe currently works as a senior cloud architect at\n[Trek10](https://trek10.com), an AWS Advanced Consulting Partner. You can\n[read more about Trek10's GitLab journey here](/customers/trek10/).\n",[109,1492,233,697,1123,9],"demo",{"slug":1494,"featured":6,"template":700},"multi-account-aws-sam-deployments-with-gitlab-ci","content:en-us:blog:multi-account-aws-sam-deployments-with-gitlab-ci.yml","Multi Account Aws Sam Deployments With Gitlab Ci","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci.yml","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"_path":1500,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1501,"content":1507,"config":1515,"_id":1517,"_type":14,"title":1518,"_source":16,"_file":1519,"_stem":1520,"_extension":19},"/en-us/blog/open-source-nasa-gl",{"title":1502,"description":1503,"ogTitle":1502,"ogDescription":1503,"noIndex":6,"ogImage":1504,"ogUrl":1505,"ogSiteName":686,"ogType":687,"canonicalUrls":1505,"schema":1506},"MRI Technologies used GitLab for unified toolchains to NASA","Live from GitLab Commit: NASA will be flying Kubernetes clusters to the moon and GitLab is helping.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678434/Blog/Hero%20Images/nasagitlab.jpg","https://about.gitlab.com/blog/open-source-nasa-gl","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":1508,"description":1503,"authors":1509,"heroImage":1504,"date":1510,"body":1511,"category":694,"tags":1512},"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA",[935],"2019-09-17","\nNASA can put [Rovers on Mars](https://mars.nasa.gov/mer/), but a complex legacy software system proved a bit of a challenge. Speaking at GitLab Commit in Brooklyn, [Marshall Cottrell](https://www.linkedin.com/in/marshall-cottrell-27b385181) of [MRI Technologies](https://www.mricompany.com) explained how the company teamed up with NASA to launch the space agency into the era of modern application development using Kubernetes and GitLab.\n\nIn September 2018 MRI began work on a new software development platform called APPDAT. \"It's the only platform taking a totally 'fresh approach' to application development and data science activities within the Agency,\" Marshall said. The team's challenge was to update an Oracle-based legacy SCM solution using open source technologies and APIs. At the time NASA had no toolchains to support CI/CD during development and lots of silos of information. \"There was no mechanism for us to disseminate innovations, best practices, or what we learned,\" Marshall said. NASA needed a unified toolchain and platform for software delivery. \"GitLab was chosen as the platform source control management solution because it is the only product in this space that integrates all stages of the DevSecOps lifecycle.\"\n\n## A laser focus helps\n\nPerhaps not surprisingly MRI had ambitious goals for APPDAT, Marshall explained. The overarching hope was to build an automated DevOps platform that served as the single source of truth. Until MRI got involved, NASA had no way to actually \"own\" the software development process; teams operated in a piecemeal fashion, choosing contractors and solutions based on situational needs rather than looking at the big picture. Those decisions left NASA subject to potentially \"abusive behavior,\" Marshall explained.\n\nSo MRI laid out a number of goals:\n\n- Empower teams to fully manage the resources they support\n- Demonstrate and promote fully open project management and collaboration\n- Create a sandbox for protoyping with no barriers to entry\n- Assemble an API and data economy that would eliminate silos and promote reusability\n- Establish platform-level security controls with a goal of \"compliant by fault\"\n\nTo get there, MRI emphasized collaboration and tried to reach out to the \"forward-leaning\" customers and individual civil servant developers, engineers and researchers who were eager to contribute. The team adhered strictly to cloud native, Zero Trust and open source approaches and, in the end, came up with a Kubernetes platform that met the space agency's needs for today and in the future. The technology choices were important, but so was the time spent laying the groundwork for a culture change. \"Many modernization proposals try to meet everyone where they're at,\" Marshall explained. \"A more opinionated approach allows us to provide a succinct and unified toolchain that all parties can contribute to, evolve, and improve over time.\"\n\nToday the 61-year old space agency has a modern platform where developers can easily collaborate with non-developers, no complex tooling is required, and context switching is a thing of the past, Marshall said. APPDAT syncs from the agency's existing SCM solutions so everyone was able to continue to use the same tools.\n\nPerhaps most exciting, NASA's plans to have astronauts established on the moon by 2024 as part of the [Artemis program](https://www.nasa.gov/what-is-artemis). That will include a data center, and Marshall is confident Kubernetes will be part of the launch.\n\n\"We’ve already begun to change minds at NASA and you can do it at your enterprise too,\" Marshall said. His last best advice: Play the long game, only innovate when it makes things easier, and a bottom-up approach is an easy way to make friends.\n\nWatch Marshall's entire presentation here:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/RsUw4Ueyn-c\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nDon't miss out on the chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/).\n\nCover image by [David Torres](https://unsplash.com/@djjabbua) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1513,784,9,697,1514],"GKE","frontend",{"slug":1516,"featured":6,"template":700},"open-source-nasa-gl","content:en-us:blog:open-source-nasa-gl.yml","Open Source Nasa Gl","en-us/blog/open-source-nasa-gl.yml","en-us/blog/open-source-nasa-gl",{"_path":1522,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1523,"content":1529,"config":1535,"_id":1537,"_type":14,"title":1538,"_source":16,"_file":1539,"_stem":1540,"_extension":19},"/en-us/blog/optimize-gitops-workflow",{"title":1524,"description":1525,"ogTitle":1524,"ogDescription":1525,"noIndex":6,"ogImage":1526,"ogUrl":1527,"ogSiteName":686,"ogType":687,"canonicalUrls":1527,"schema":1528},"Optimize GitOps workflow with version control from GitLab","A GitOps workflow improves development, operations and business processes and GitLab’s CI plays a vital role.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673081/Blog/Hero%20Images/gitops-image-unsplash.jpg","https://about.gitlab.com/blog/optimize-gitops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Optimize GitOps workflow with version control from GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-10-28\",\n      }",{"title":1524,"description":1525,"authors":1530,"heroImage":1526,"date":1531,"body":1532,"category":694,"tags":1533},[894],"2019-10-28","\nGitOps is a way for IT operations to manage changes across infrastructure and development teams. At GitLab\nConnect in Denver, [Tyler Sparks](https://www.linkedin.com/in/sparksconcept/), principal engineer and\nowner of Sparks Concept, presented a talk on why GitOps is a productive workflow and how\nusing GitLab can increase communication and version control.\n\n[GitOps](/topics/gitops/) uses infrastructure as code but with processes in place on top of it, including extensive use of\nmerge requests for everything from policy to infrastructure changes. “Success for most companies and\nengineering groups is based on the interactions of a large, complex, distributed system,” Tyler says.\nThe goal of GitOps is to incorporate Git beyond development and operations teams, improving the\nbusiness as a whole with the right tool. “It's a really cool way that GitLab integrates and it's a way to\nshift things left in your organization.”\n\n## The Git in GitOps\n\n“Git is the single source of truth. You shouldn’t be able to make any change outside of Git,” Tyler says. This creates one clean transaction between teams. Git establishes a unified location for anything from security, infrastructure changes, deployments, process changes, and even the integration of other tools. “Git is serving as the glue to make these safe transitions so that you can move faster as a team,” Tyler says.\n\nCreating that interaction between groups is often elaborate and difficult to manage. “Anyone building software these days is finding it more and more complex...everything is changing, the landscape is constantly changing,” Tyler says. Services are being run on stacks upon stacks and there is a lot of risk involved in maintenance. A tool, like [GitLab CI](/solutions/continuous-integration/), simplifies the processes and grants visibility.\n\n## GitOps best practices\n\nIn a GitOps workflow, where one simple change can impact three different teams, a strong [version control is imperative for communication](/topics/version-control/). Between disparate tools and poorly defined handoffs, the solution is to move into one repository for all tools and teams. With one overarching repository, “You can have a bunch of parallel workstreams running safely… you will have minimum viable change and a way to observe it,” Tyler says.\n\nWith GitLab’s version control system in place, teams can see what’s going on to work together and to know what change is going to impact where. “GitLab CI is one of the original products that made it possible to start to take an integrative view of the system,” Tyler says. “This is the penultimate way to [promote collaboration](/topics/gitops/gitops-gitlab-collaboration/) and to break down silos within an organization. GitLab is a tool that helps with that.”\n\nGitLab’s version control not only safeguards the infrastructure, but ultimately trickles throughout the entire enterprise. “As companies adopt GitLab, they’re not just more successful with their technology...it really comes down to how they’re functioning as a group,” Tyler says. “GitLab encourages some really good practices around development and how teams interact.”\n\n>“That’s why GitLab is the clear winner...They’re not just leading Gartner and Forrester because they paid somebody off. They’re actually an amazing tool.” Tyler Sparks, principal engineer and owner of Sparks Concept\n\nLearn more about GitOps best practices and Tyler’s work with GitLab CI in his presentation below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/5ykRuaZvY-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nCover image by [David Rangel](https://unsplash.com/@rangel) on [Unsplash](https://unsplash.com)\n{: .note}\n",[743,1534,233,9],"performance",{"slug":1536,"featured":6,"template":700},"optimize-gitops-workflow","content:en-us:blog:optimize-gitops-workflow.yml","Optimize Gitops Workflow","en-us/blog/optimize-gitops-workflow.yml","en-us/blog/optimize-gitops-workflow",{"_path":1542,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1543,"content":1549,"config":1555,"_id":1557,"_type":14,"title":1558,"_source":16,"_file":1559,"_stem":1560,"_extension":19},"/en-us/blog/proximus-customer-story-clearcase-to-gitlab",{"title":1544,"description":1545,"ogTitle":1544,"ogDescription":1545,"noIndex":6,"ogImage":1546,"ogUrl":1547,"ogSiteName":686,"ogType":687,"canonicalUrls":1547,"schema":1548},"Proximus shares its #movingtoGitLab story","Moving to GitLab resulted in an 80 percent drop in support tickets and an increase in developer productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678603/Blog/Hero%20Images/traffic-at-sunset.jpg","https://about.gitlab.com/blog/proximus-customer-story-clearcase-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Proximus shares its #movingtoGitLab story\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bert Van Eyck\"}],\n        \"datePublished\": \"2019-06-07\",\n      }",{"title":1544,"description":1545,"authors":1550,"heroImage":1546,"date":1552,"body":1553,"category":804,"tags":1554},[1551],"Bert Van Eyck","2019-06-07","\n[Proximus](https://www.proximus.com/) is a telecommunication company providing services to residential, enterprise, and public users. We are the leading provider of telephony, internet, television, and network-based ICT services in Belgium, with more than 2 million customers.\n\n## Our road to GitLab\n\nThe technical divisions of Proximus deliver a big part of the applications and systems required for delivering the best possible service to our end users. It includes all types of capabilities such as network construction, network maintenance, product ordering, product selling, billing, etc.\nSome examples of our development include:\n\n- Our website, [Proximus.be](https://www.proximus.be), on which users can find product info, support info and so much more.\n- A mobile app where everyone can check their usage, products, bills, etc.\n- Television interface.\n- A television app.\n\nTo ensure a performant and stable working environment for our developers, we have been working for several years to create a CI/CD DevOps workflow.\n\nThe first complete chain started in 2014 and used tools like ClearCase, Jenkins, Nexus, etc. By 2015 we had about 200 applications which were using our end-to-end chain to build and deploy in all different environments.\n\nIn 2016, to continue to improve our delivery chain, we considered switching ClearCase to Git. Despite ClearCase being a powerful tool, we noticed that the learning curve and the ease of use of ClearCase was not optimal. Also some of the tools we used were starting to lose compatibility.\n\nWe quickly came across GitLab and decided to try our first setup with [GitLab CE](/blog/gitlab-tiers/) in mid-2016.\n\n## The evolution of GitLab inside Proximus\n\nOur first implementation of Gitlab was rapidly a real success and the popularity of GitLab was increasing exponentially within our developer community. So, we decided to set up a corporate GitLab CE server at Proximus and to promote the creation of all new applications using our existing CI/CD chain with GitLab as source code management.\nIn just one year of using GitLab, we grew to 325 projects and about 600 users.\n\nBecause GitLab was becoming a big part of our tool set, we switched to GitLab EE in Q2 of 2017. This allowed us to use more features of GitLab such as: LDAP groups, push rules, mirror repositories, etc.\nAnd of course, with the enterprise edition you also receive additional support. With the enterprise edition we also started moving applications from ClearCase to GitLab.\n\nWe were also investigating and testing other features to expand our use of GitLab in the meantime:\n\n- Some projects have started using GitLab CI to build.\n- Integration with Jira has been implemented.\n- Currently experimenting with a first setup of GitLab’s global search function in combination with Elasticsearch.\n\nBy the end of 2018 we had grown to almost 1,000 users and 1,700 projects.\n\n## Challenges\n\nOur biggest challenge was to maintain and ensure a stable environment while growing rapidly. When we started using GitLab CI we encountered some issues with the large number of pipelines and jobs being created, which were consuming a lot of our resources. But [as of GitLab 11.6 a feature has been provided to remove pipelines with their job logs when using API](/releases/2018/12/22/gitlab-11-6-released/#pipelines-can-now-be-deleted-by-project-maintainers-using-api), which helped a lot.\n\n## Results\n\nSince we started using GitLab, we have been able to provide our developers with faster setup and support. Another very noticeable side effect of switching to GitLab was the significant drop in the number of support tickets created by the developers. Our first full year of using GitLab inside our CI/CD setup resulted in **80 percent** fewer tickets.\n\nEven in 2018, after our total number of users had grown to almost 1,000, the number of projects had multiplied by five and we migrated 75 applications to GitLab. We still had **65 percent** fewer tickets.\n\nIn the future, we will continue looking into expanding our GitLab environment and we hope to continue the positive evolution together with the support of GitLab.\n",[269,743,233,9,697],{"slug":1556,"featured":6,"template":700},"proximus-customer-story-clearcase-to-gitlab","content:en-us:blog:proximus-customer-story-clearcase-to-gitlab.yml","Proximus Customer Story Clearcase To Gitlab","en-us/blog/proximus-customer-story-clearcase-to-gitlab.yml","en-us/blog/proximus-customer-story-clearcase-to-gitlab",{"_path":1562,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1563,"content":1569,"config":1575,"_id":1577,"_type":14,"title":1578,"_source":16,"_file":1579,"_stem":1580,"_extension":19},"/en-us/blog/python-rust-and-gitlab-ci",{"title":1564,"description":1565,"ogTitle":1564,"ogDescription":1565,"noIndex":6,"ogImage":1566,"ogUrl":1567,"ogSiteName":686,"ogType":687,"canonicalUrls":1567,"schema":1568},"From idea to production with Python, Rust and GitLab CI","GitLab hero Mario Garcia demos the intricate process at GitLab Commit London.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678507/Blog/Hero%20Images/lightbulb.jpg","https://about.gitlab.com/blog/python-rust-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Bringing your application from idea to production using Python, Rust, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":1570,"description":1565,"authors":1571,"heroImage":1566,"date":1572,"body":1573,"category":301,"tags":1574},"Bringing your application from idea to production using Python, Rust, and GitLab CI",[780],"2019-11-15","During his talk at GitLab Commit London, GitLab Hero [Mario\nGarcía](https://gitlab.com/mattdark), explains how he troubleshooted his way\nthrough numerous roadblocks to take his Firebase application from\ndevelopment to production using Rust, Python and GitLab CI.\n\n\n## Rewriting from Python to Rust\n\n\n### What is Rust?\n\n\nWhile Python is a household name among developers, Rust is the new kid on\nthe block when it comes to a systems programming language.\n\n\n[Rust](https://www.rust-lang.org/) was developed by Mozilla is giving to the\nworld, it's been in development since 2009 with a first stable version\nreleased in May 2015 and it aims to improve memory usage while maintaining\nperformance and speed. Mario, who is a Mozilla representative, dedicated\nhimself to learning Rust in late 2015. He started this journey by reading\nthe Rust book, [solving programming\nexercises](https://exercism.io/tracks/rust), migrating Python code to Rust,\nand then rewriting one of his [personal projects, a gallery for reveal.js\npresentations, in Rust](https://gitlab.com/mattdark/reveal-js-gallery).\n\n\nReveal-js is a framework for creating presentations using HTML, and allows\nthe user to store speaker notes, images, and more in a presentation gallery.\nMario first wrote his gallery app in Python but migrated the project into\nRust while he was learning the new language and found the process to be\nrelatively painless. But it wasn’t long before Mario hit a bump in the road\nwhen it came to using Rust for other projects.\n\n\n### Problems with Rust\n\n\n“I was working on another project that I applied to the Mozilla Open Leaders\nprogram two years ago,” said Mario. “And for this project I was using [Cairo\nSVG Python library](https://cairosvg.org/). I needed this specific library\nbecause I was converting SVG files to PDF. So that's how I found out that it\nwas _impossible_ to rewrite this specific part with Rust because there is no\nalternative available in Rust for this library.”\n\n\nNot only did Rust lack an alternative to the CairoSVG Python library, but\nthere was also no crates (Rust libraries) for Firebase. Mario needed\nFirebase for his project that takes the database of speaker information and\nautomatically generates certificates of participation.\n\n\nMario was presenting an example of a web app at Google I/O Extended on how\nto use Rust and Firebase with web apps. But there was no functional library\nin Rust that could connect with Firebase and retrieve data from the\ndatabase.\n\n\nMario came up with a solution: use Python.\n\n\n_More of a video person? Watch Mario’s entire presentation from GitLab\nCommit London in the video below, or follow along step by step in this blog\npost._\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/BYfJBa_79Xo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Using Python and Rust together\n\n\nIn his presentation at GitLab Commit London, Mario demonstrated how he\nmanaged to build a Firebase web application in Rust using Python, and deploy\nit using GitLab CI so fellow GitLab users can try to replicate his process,\nor get some input if they're also having difficulties.\n\n\n### Configure your environment\n\n\nThe first step is to make sure that your environment is properly configured.\nTo use both Python, Rust, and GitLab CI, you’ll need the following on your\nmachine:\n\n\n*   Git\n\n*   [GCC](https://crates.io/crates/gcc)\n    *   Rust needs a C compiler and Cargo, which is the package manager for Rust projects\n*   Rust\n    *   Nightly mode for this project\n    *   Cargo\n*   Python 3.5+\n    *   [pipenv](https://github.com/pypa/pipenv) for managing dependencies\n\nInstall Rust using [Rustup](https://rustup.rs/) by typing the code below\ninto your terminal.\n\n\n`curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`\n\n\nYou’ll also need to install bindings to run Python code directly from Rust,\nand that will also help with writing Python models using Rust code. Mario\nrecommends [CPython](https://crates.io/crates/cpython)and\n[Py03](https://crates.io/crates/pyo3), but used CPython in this demo.\n\n\n### Kick-start your project\n\n\nNext, Mario describes the general process for creating a project using\nPython and Rust.\n\n\nCargo is a package manager for Rust projects, and will create a Cargo.toml\nfile and src/ directory when its run. The Cargo.toml file is the manifest\nfor the application and includes the dependencies the project requires.\nWithin the src/ file is a [main.rs\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/src/main.rs)\nthat contains an example of a Rust application.\n\n\nThe next step is to move through the src/ directory Cargo created to set up\nthe default toolchain for the project.\n\n\n```ruby\n\n[package]\n\nname = \"firebase_sample\"\n\nversion = \"0.1.0\"\n\nauthors = [\"mattdark\"]\n\nedition = \"2018\"\n\n[dependencies]\n\ncpython = \"0.3\"\n\nserde = \"1.0.99\"\n\nserde_derive = \"1.0.99\"\n\nserde_json = \"1.0.40\"\n\nrocket = \"0.4.2\"\n\n[dependencies.rocket_contrib]\n\nversion = \"0.4\"\n\nfeatures = [\"handlebars_templates\"]\n\n```\n\n\nThe Cargo.toml file will show the name of the application, the version,\nauthors etc. And if you’re working on Linux, it will take the user of your\nsystem and put it as the author of the project.\n\n{: .note}\n\n\n“The dependencies that we need for the project are CPython for the Python\npart, [Serde](https://serde.rs/), which is a library that help us with\nreading information for files like JSON, and Rocket, which is a web\nframework for Rust,” said Mario.\n\n\nNext, set the [Nightly version of\nRust](https://doc.rust-lang.org/1.2.0/book/nightly-rust.html) as the default\ntoolchain for the project.\n\n\nAdd a ‘python’ directory to src/ directory, where you’ll be adding the\nPython models required for this project to this directory.\n\n\nOnce the src/python is set-up, add the Pipfile or [requirements.txt\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/requirements.txt)\nfor the dependencies of the Python module to the directory.\n\n\n```ruby\n\n[[source]]\n\nname = \"pypi\"\n\nurl = \"https://pypi.org/simple\"\n\nverify_ssl = true\n\n[dev-packages]\n\n[packages]\n\nfirebase = \"*\"\n\npython-jwt = \"*\"\n\ngcloud = \"*\"\n\nsseclient = \"*\"\n\npycrypto = \"*\"\n\nrequests-toolbelt = \"*\"\n\n[requires]\n\npython_version = \"3.7.3\"\n\n```\n\n\nThe Pipfile is an example of a project used for Firebase. Included here is\nall the dependencies we need for Firebase in the file, as well as the Python\nversion in use.\n\n{: .note}\n\n\nNext write the Rust code in src/main.rs and add the Python scripts in\nsrc/python.\n\n\n### Writing the Python code\n\n\nMario’s Firebase application is designed to rake a database of speaker\ninformation and automatically generate certificates of participation in PDF\nformat.\n\n\n```\n\n{\n  \"slides\" : {\n    \"privacymatters\" : {\n      \"description\" : \"Talk about privacy & security\",\n      \"file\" : \"privacy-matters.md\",\n      \"id\" : \"2\",\n      \"screenshot\" : \"/img/screenshot/privacy-matters.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Why Privacy Matters?\",\n      \"url\" : \"privacy-matters\"\n    },\n    \"rust101\" : {\n      \"description\" : \"Introduction to Rust\",\n      \"file\" : \"rust-101.md\",\n      \"id\" : \"1\",\n      \"screenshot\" : \"/img/screenshot/rust-101.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Rust 101\",\n      \"url\" : \"rust-101\"\n    },\n    \"rustrocket\" : {\n      \"description\" : \"Building Web Apps with Rust + Rocket\",\n      \"file\" : \"rust-rocket.md\",\n      \"id\" : \"3\",\n      \"screenshot\" : \"/img/screenshot/rust-rocket.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Rust + Rocket\",\n      \"url\" : \"rust-rocket\"\n    },\n    \"whyrust\" : {\n      \"description\" : \"What is Rust and Why Learn it?\",\n      \"file\" : \"why-rust.md\",\n      \"id\" : \"4\",\n      \"screenshot\" : \"/img/screenshot/why-rust.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Why Rust?\",\n      \"url\" : \"why-rust\"\n    }\n  }\n}\n\n```\n\n{: .language-ruby}\n\n\nInformation about Mario’s Firebase application lives in this JSON file of\nthe Firebase database.\n\n{: .note}\n\n\nThe application is written in Rust, and therefore needed a Firebase\nconnector. But since the is not a functional Firebase crate, Mario had to\nthink outside the box and use the Python library.\n\n\n```\n\nimport json\n\nfrom firebase import Firebase\n\ndef read_data(self):\n    config = {\n        \"apiKey\": \"APIKEY\",\n        \"authDomain\": \"fir-speakers.firebaseapp.com\",\n        \"databaseURL\": \"https://fir-speakers.firebaseio.com\",\n        \"projectId\": \"fir-speakers\",\n        \"storageBucket\": \"\",\n        \"messagingSenderId\": \"MESSAGINGSENDERID\"\n    }\n    firebase = Firebase(config)\n    speaker = list()\n    db = firebase.database()\n    all_speakers = db.child(\"speakers\").get()\n    for x in all_speakers.each():\n        speaker.append(x.val())\n    s = json.dumps(speaker)\n    return s\n```\n\n{: .language-ruby}\n\n\n“For the Python part of the project, we have to connect to the Firebase\ndatabase, retrieve the data and save it to a variable that later we will\nconvert to JSON so that Rust can correctly rake the data and pass it to the\nHTML5,” said Mario.\n\n\n### Troubleshooting\n\n\nThere was a profound lack of documentation about how to use Rust and Python\ntogether to build a Firebase application, and Mario ran into even more\nhurdles as he tried to troubleshoot.\n\n\nThe two major problems that he was trying to solve were:\n\n\n*   Calling a Python script (.py) from Rust\n\n*   Passing a value from Rust to Python\n\n\n“In the Github repositories for these projects – well at least for the\nlibrary that I'm using – there is no information about how you can do those\ntasks,” said Mario.\n\n\nAfter hours of researching and testing, he discovered a solution.\n\n\n### Building the Project\n\n\nMario was able to run the Python script from Rust and execute the function\nthat connects to the Firebase database. Once connected to the Firebase\ndatabase, the process will retrieve the data and funnel it back to Rust as\nJSON.\n\n\n![Rust\ncode](https://about.gitlab.com/images/blogimages/python_and_rust_post/rust-code.jpg){:\n.shadow.medium.center}\n\n\nAfter some troubleshooting, Mario discovered the proper code to run in Rust\nto bridge the gap between Rust and the Firebase application.\n\n{: .note-text.center}\n\n\nNext, the Rust code will convert the values into a HashMap, and pass that\ninformation to an HTML file.\n\n\nNow that the project is built, it’s time to run it using:\n\n\n```\n\ncargo run\n\npipenv run cargo run\n\n```\n\nTo see your project type `localhost:8000` into the web browser.\n\nThe result should look similar to what you see here and in the [GitLab\nproject](https://gitlab.com/mattdark/reveal-js-gallery).\n\n\n![GitLab project\npreview](https://about.gitlab.com/images/blogimages/python_and_rust_post/gitlabproject.jpg){:\n.shadow.medium.center}\n\n\n## Deploying the application with GitLab CI\n\n\n### Dockerize the application\n\n\nTo configure for GitLab CI, Mario had to choose a Docker image for running\nthe test and deployment. There is a custom Docker image for Rust that can be\ncustomized to fit the specific version for Rust, which in this case is Rust\nNightly.\n\n\n`rustlang/rust:nightly`\n\n\n“The problem is that the Python version that is installed in these Docker\nimage is based on Debian image itself, so we need pipenv and we need other\ntools to be installed,” said Mario.\n\n\nSo Mario customized the Docker image and generated a second one that has the\npipenv components.\n\n\n### Create the repository\n\n\nNow that the Docker images are configured for the application, it’s time to\ncreate the repository and upload the code using the Terminal or GitKraken.\n\n\nThe next – and arguably the most important – step in the process is\n**documentation**. Mario urges all users to upload any and all relevant\nfiles to the repository, such as the README, LICENSE, CODE_OF_CONDUCT.md,\netc.\n\n\nOnce the necessary files are uploaded into the repository, it’s time to\nstart configuring for GitLab CI.\n\n\nMario recommends using Gitignore.io to the .gitignore file for the\ntechnologies being used for the project (in this case, Rust or Python).\nThere are three key files that need to be written to configure the pipelines\nrequired for running GitLab CI:\n\n\n*   **Procfile**: A way to tell a platform like Heroku what is the binary\nfile for the project. Since the project is being developed with Rust, it\nwill generate a binary file that needs to be executed.\n\n*   **RustConfig**: Contains the version of Rust we are using for the\nproject.\n\n*   **Rocket.toml**: Can be used to specify the configuration parameters for\nthe environment.\n\n\nYou can find examples of these files in the [Firebase example project on\nGitLab](https://gitlab.com/mattdark/firebase-example/tree/prod).\n\n\n### GitLab CI\n\n\nAll of these efforts go into preparing the application for deployment using\nGitLab CI. Deployment with GitLab CI is simple, because each stage of the\ndeployment process lives in a yaml file. [Mario’s gitlab-ci.yml\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/.gitlab-ci.yml)\nonly includes the build and production stages, but [more comprehensive\ninformation about GitLab CI is available\nhere](https://docs.gitlab.com/ee/ci/).\n\n\n## Document, document, document\n\n\nThe lack of documentation created significant delays for Mario as he tried\nto get his Firebase application off the ground. While in this case the\ninformation he required was difficult to track down even in English, there\nare even more substantial barriers for non-native English speakers or\nnon-English speaking programmers.\n\n\n>>“I'm from Mexico, so I'm living in a Spanish-speaking country and I\nstarted learning English 15 years ago. That means that I'm in a privileged\nposition. When we are writing the documentation sometimes, we forget that\nnot many people have the opportunity to learn English,” said Mario. “I'm\ntalking about English because most of the information and documentation of\ntechnologies that are available in this language. So if we live in a\nnon-English speaking country, don't forget to write the documentation in our\nnative language.”\n\n\nHis comments resonated strongly with the GitLab Commit London audience.\n\n\n{::options parse_block_html=\"false\" /}\n\n\n\u003Cdiv class=\"center\">\n\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">This is so\nimportant for accessibility.\u003Cbr>\u003Cbr>Same goes for filling documentation full\nof jargon and marketing terms.\u003Cbr>\u003Cbr>Documentation is there to inform those\nwho don&#39;t have the knowledge, presuming knowledge just furthers a toxic\nculture of gatekeeping. \u003Ca\nhref=\"https://t.co/k7EILtHuvy\">pic.twitter.com/k7EILtHuvy\u003C/a>\u003C/p>&mdash;\nMatt Smith (@Harmelodic) \u003Ca\nhref=\"https://twitter.com/Harmelodic/status/1181946002720411648?ref_src=twsrc%5Etfw\">October\n9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async\nsrc=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\n\u003C/div>\n\n\nJoin us at GitLab Commit San Francisco to hear about the innovative ways\nusers like Mario are using GitLab and other open source technologies!\nRegistration information is available below.\n\n\nCover image by [Jack Carter](https://unsplash.com/@carterjack) on\n[Unsplash](https://unsplash.com/s/photos/lightbulb).\n\n{: .note}\n",[109,269,9],{"slug":1576,"featured":6,"template":700},"python-rust-and-gitlab-ci","content:en-us:blog:python-rust-and-gitlab-ci.yml","Python Rust And Gitlab Ci","en-us/blog/python-rust-and-gitlab-ci.yml","en-us/blog/python-rust-and-gitlab-ci",{"_path":1582,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1583,"content":1589,"config":1594,"_id":1596,"_type":14,"title":1597,"_source":16,"_file":1598,"_stem":1599,"_extension":19},"/en-us/blog/redbox-on-demand-delivers-with-gitlab",{"title":1584,"description":1585,"ogTitle":1584,"ogDescription":1585,"noIndex":6,"ogImage":1586,"ogUrl":1587,"ogSiteName":686,"ogType":687,"canonicalUrls":1587,"schema":1588},"Redbox delivers On Demand with GitLab","Redbox's Joel Vasallo and Nicholas Konieczko explain how they ‘deliver software like a fox’ with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673064/Blog/Hero%20Images/redbox-blog-jannes-glas-unsplash.jpg","https://about.gitlab.com/blog/redbox-on-demand-delivers-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Redbox delivers On Demand with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-10-01\",\n      }",{"title":1584,"description":1585,"authors":1590,"heroImage":1586,"date":1591,"body":1592,"category":694,"tags":1593},[894],"2019-10-01","\nAt GitLab Connect Chicago, Redbox's [Joel Vasallo](https://www.linkedin.com/in/joelvasallo) and [Nicholas Konieczko](https://www.linkedin.com/in/nick-konieczko-42895354) presented a talk called “Delivering software like a fox.” Redbox, primarily known for providing movie and video game rentals via automated retail kiosks, has recently expanded to provide streaming services.\n\nRedbox On Demand is the company's newest streaming platform, built on .NET Core in containers on Linux in the cloud. The video retail company had a few goals in mind when building its latest platform. Joel, cloud DevOps manager, and Nicholas, mobile applications manager, share their three main objectives and how GitLab provides the tool that ensures success.\n\n## Goal #1: Modernize software development processes\n\nThe mobile and development teams wanted to be able to create the platform using the latest technology in order to provide the best product for the customer. “[There was] nothing wrong with the way they were done, but in the sense that the world has really come a long way from traditional Windows servers... in a data center running .NET frameworks and stuff like that, we really wanted to empower developers to use containers,” Joel says.\n\n**Outcome**: The mobile and development teams currently use GitLab CI, leveraging Fastlane. The power of GitLab and its ability to work along with other tools helped to modernize software development processes.\n\n## Goal #2: Speed up delivery to the cloud\n\nProviding an on-demand service means that the application has to actually be ready at the very moment of demand. Being new to the streaming arena, it was important for Redbox to move to the cloud. “We also wanted to leverage the power of the cloud and have the scaling perspective of the cloud. We wanted to be in the cloud, as everyone wants to be nowadays. We also wanted to ensure that our features go out the door faster because, in the streaming business, it's all about being first to market with your features,” Joel says.\n\n**Outcome**: The teams now use GitLab CI along with Spinnaker. “We wanted to increase software delivery and do what's best for the teams. I don't want to dictate what developers should do in their day-to-day workflow,” Joel says.\n\n## Goal #3: Empower developers to own their applications\n\nThe hope was that a developer would be able to deploy code to production at any time with a single click of a button. Developers would then have the ability to just write the code and a working tool will be able to pick up the errors. “Code goes out the door based on an approval process. Developers are able to own and operate their code, essentially,” Joel says.\n\n**Outcome**: The objective was achieved, according to Joel. “Ultimately, developers own their own apps. GitLab Enterprise allowed teams to own their own verticals as well as Spinnaker, which allows them to deploy it to whatever cloud provider that they so choose.”\n\nTo learn more about how GitLab helped the mobile and development teams achieve their platform goals (and more), watch the presentation below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3eG8Muorafo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### Putting the version in version control\n\n“There was a disparate amount of Git and source control tools. Namely, we had an internal Git server, which... I don't know what it was actually running. We had GitHub.com. We had Team Foundation Server. We had Azure DevOps. So all this stuff... Teams were really all over the place. They all had their source code. Getting access to things was just a nightmare.\n\n“So what did we do? Let's get another version control system into the mix. We need a fifth one. So we picked GitLab. Honestly, GitLab was the most tried and true solution from our perspective. It has support for a few things, like on-prem, also in the cloud as well on the .com offering. But, more than that, at the end of the day it let developers control their namespace within a large organization.” – _Joel Vasallo_\n\n### GitLab works for mobile development\n\n“The mobile teams were the first to get to try out GitLab.com. It's simple. It's extremely easy to get started. There's a lot of documentation out there, all the things I love. It's very cost effective. We were able to get a free trial running, get repos open, test out different things, different features, to see if it could work for our teams.\" – _Nick Konieczko_\n\n### Yes, you can use Jenkins too\n\n“This is, honestly, one of the best things about GitLab, is they just want us to be successful. Batteries are included. There's a lot of great tools in there, such as GitLab CI, the GitLab Issue Board... and GitLab's Artifact Repository. It's built into the platform. GitLab's pipelines with the CI/CD process, all of this comes in. But if you don't want to use it, it'll adapt to your business model.\n\n“For example, my team uses Jenkins. We can still use GitLab. There's no blocking event where it says, ‘Oh, you're using Jenkins. You can't talk to us. Error. Blocked.’ No, we use Jira. We type ‘Jira, take us into GitLab’ all the time. We have JFrog Artifactory. We also use Spinnaker for our software delivery. Again, it transforms to what you need as a business, and that's the thing that I really appreciate, being on the DevOps side.” – _Joel Vasallo_\n\nCover image by [Jannes Glas](https://unsplash.com/@jannesglas) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,1534,743,233],{"slug":1595,"featured":6,"template":700},"redbox-on-demand-delivers-with-gitlab","content:en-us:blog:redbox-on-demand-delivers-with-gitlab.yml","Redbox On Demand Delivers With Gitlab","en-us/blog/redbox-on-demand-delivers-with-gitlab.yml","en-us/blog/redbox-on-demand-delivers-with-gitlab",{"_path":1601,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1602,"content":1608,"config":1615,"_id":1617,"_type":14,"title":1618,"_source":16,"_file":1619,"_stem":1620,"_extension":19},"/en-us/blog/sentry-integration-blog-post",{"title":1603,"description":1604,"ogTitle":1603,"ogDescription":1604,"noIndex":6,"ogImage":1605,"ogUrl":1606,"ogSiteName":686,"ogType":687,"canonicalUrls":1606,"schema":1607},"Sentry's GitLab integration streamlines error remediation","Your code has bugs, my code has bugs, everyone’s code has bugs (probably). Let’s fix that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679964/Blog/Hero%20Images/sentry-io-blog.jpg","https://about.gitlab.com/blog/sentry-integration-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline and shorten error remediation with Sentry’s new GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eva Sasson\"}],\n        \"datePublished\": \"2019-01-25\",\n      }",{"title":1609,"description":1604,"authors":1610,"heroImage":1605,"date":1612,"body":1613,"category":694,"tags":1614},"Streamline and shorten error remediation with Sentry’s new GitLab integration",[1611],"Eva Sasson","2019-01-25","\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KUHk1uuXWhA?rel=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nSentry is open source error tracking that gives visibility across your entire stack and provides the details you need to fix bugs, ASAP. Because the only thing better than visibility and details is more visibility and details, Sentry improved their [GitLab integration](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) by adding [release](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) and [commit](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#link-repository) tracking as well as [suspect commits](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#after-linking-a-repository).\n\n### Streamline your workflow with issue management and creation\n\nWhen you receive an alert about an error, the last thing you want to do is to jump around 20 different tools trying to find out exactly what happened and where. Developers with both Sentry and GitLab in their application lifecycle benefit from issue management and issue creation to their GitLab accounts directly in the Sentry UI, alleviating some of the hassle of back-and-forth tool toggling.\n\n![GitLab account in Sentry](https://about.gitlab.com/images/blogimages/sentry/gitlab-sentry-integration.png){: .shadow.large.center}\n\nOf course, less tool jumping results in a more streamlined triaging process and shortened time to issue resolution – something that benefits the whole team.\n\n![Creating GitLab issue](https://about.gitlab.com/images/blogimages/sentry/create-gitlab-issue.png){: .shadow.medium.center}\n\nHave a GitLab issue that wasn’t created in Sentry? No problem. Existing issues are also easily linked.\n\n![Import GitLab issue](https://about.gitlab.com/images/blogimages/sentry/import-gitlab-issue.png){: .shadow.medium.center}\n\n### Find and fix bugs faster with release and commit tracking\n\nWhy stop at streamlining the triaging process, when we can also make issue resolution more efficient? Sentry’s GitLab integration now utilizes GitLab commits to find and fix bugs faster.\n\nWith the newly added release and commit tracking, an enhanced release overview page uncovers new and resolved issues, files changed, and authors. Developers can also resolve issues via commit messages or merge requests, see suggested assignees for issues, and receive detailed deploy emails.\n\nWant a big flashing arrow that points to an error’s root cause? Sentry’s suspect commits feature exposes the commit that likely introduced an error as well as the developer who wrote the broken code.\n\n![Suspect commits feature](https://about.gitlab.com/images/blogimages/sentry/suspect-commits-feature.png){: .shadow.medium.center}\n\nKeep in mind that this feature is available for Sentry users on “Teams” plans and above.\n{: .note}\n\nCheck out [Sentry’s GitLab integration documentation](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) to get started.\n\n### What’s next?\n\nAgain, why stop there, when we can do even more? GitLab is currently working to bring Sentry into the GitLab interface. Soon, GitLab and Sentry users will see their Sentry errors listed in their GitLab projects. Read the documentation on [the integration here](https://docs.gitlab.com/ee/operations/error_tracking.html).\n\n### About the guest author\n\nEva Sasson is a Product Marketer at [Sentry.io](https://sentry.io/welcome/), an open source error-tracking tool that gives developers the contextual information they need to resolve issues quickly, and integrates with the other development tools across the stack.\n",[109,807,830,233,697,720,721,9,918],{"slug":1616,"featured":6,"template":700},"sentry-integration-blog-post","content:en-us:blog:sentry-integration-blog-post.yml","Sentry Integration Blog Post","en-us/blog/sentry-integration-blog-post.yml","en-us/blog/sentry-integration-blog-post",{"_path":1622,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1623,"content":1629,"config":1635,"_id":1637,"_type":14,"title":1638,"_source":16,"_file":1639,"_stem":1640,"_extension":19},"/en-us/blog/setting-up-gitlab-ci-for-android-projects",{"title":1624,"description":1625,"ogTitle":1624,"ogDescription":1625,"noIndex":6,"ogImage":1626,"ogUrl":1627,"ogSiteName":686,"ogType":687,"canonicalUrls":1627,"schema":1628},"Setting up GitLab CI for Android projects","Learn how to set up GitLab CI to ensure your Android app compiles and passes tests.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666699/Blog/Hero%20Images/banner.jpg","https://about.gitlab.com/blog/setting-up-gitlab-ci-for-android-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab CI for Android projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2018-10-24\",\n      }",{"title":1624,"description":1625,"authors":1630,"heroImage":1626,"date":1632,"body":1633,"category":741,"tags":1634},[1631],"Jason Yavorska","2018-10-24","Note: This is a new version of a previously published blog post, updated for\nthe current Android API level (28). Thanks Grayson Parrelli for authoring\n[the original post](/blog/setting-up-gitlab-ci-for-android-projects/)!\n\n{: .alert .alert-info}\n\n\nHave you ever accidentally checked on a typo that broke your Android build\nor unknowingly broke an important use case with a new change? Continuous\nintegration is a way for developers to avoid these headaches, allowing you\nto confirm that changes to your app compile, and your tests pass before\nthey're merged in.\n\n\n[GitLab CI/CD](/solutions/continuous-integration/) is a wonderful [continuous\nintegration](/blog/continuous-integration-delivery-and-deployment-with-gitlab/)\nbuilt-in solution, and in this post we'll walk through how to set up a basic\nconfig file (`.gitlab-ci.yml`) to ensure your Android app compiles and\npasses unit and functional tests. We assume that you know the process of\ncreating an Android app, can write and run tests locally, and are familiar\nwith the basics of the GitLab UI.\n\n\n## Our sample project\n\n\nWe'll be working with a real-world open source Android project called\n[Materialistic](https://github.com/hidroh/materialistic) to demonstrate how\neasy it is to get up and running with GitLab CI for Android. Materialistic\ncurrently uses Travis CI with GitHub, but switching over is a breeze. If you\nhaven't seen Materialistic before, it's a fantastic open source Android\nreader for [Hacker News](https://news.ycombinator.com).\n\n\n### Testing\n\n\n[Unit\ntests](https://developer.android.com/training/testing/unit-testing/index.html)\nare the fundamental tests in your app testing strategy, from which you can\nverify that the logic of individual units is correct. They are a fantastic\nway to catch regressions when making changes to your app. They run directly\non the Java Virtual Machine (JVM), so you don't need an actual Android\ndevice to run them.\n\n\nIf you already have working unit tests, you shouldn't have to make any\nadjustments to have them work with GitLab CI. Materialistic uses\n[Robolectric](http://robolectric.org/) for tests,\n[Jacoco](https://www.eclemma.org/jacoco/) for coverage, and also has a\nlinting pass. We'll get all of these easily running in our `.gitlab-ci.yml`\nexample except for Jacoco, since that requires a secret token we do not have\n- though I will show you how to configure that in your own projects.\n\n\n## Setting up GitLab CI\n\n\nWe want to be able to configure our project so that our app is built, and it\nhas the complete suite of tests run upon check-in. To do so, we have to\ncreate our GitLab CI configuration file, called `.gitlab-ci.yml`, and place\nit in the root of our project.\n\n\nSo, first things first: If you're just here for a snippet to copy-paste,\nhere is a `.gitlab-ci.yml` that will build and test the Materialistic app:\n\n\n```yml\n\nimage: openjdk:8-jdk\n\n\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n\nstages:\n  - build\n  - test\n\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\n\nWell, that's a lot of code! Let's break it down.\n\n\n### Understanding `.gitlab-ci.yml`\n\n\n#### Defining the Docker Image\n\n{:.special-h4}\n\n\n```yml\n\nimage: openjdk:8-jdk\n\n```\n\n\nThis tells [GitLab Runners](https://docs.gitlab.com/ee/ci/runners/) (the\nthings that are executing our build) what [Docker\nimage](https://hub.docker.com/explore/) to use. If you're not familiar with\n[Docker](https://hub.docker.com/), the TL;DR is that Docker provides a way\nto create a completely isolated version of a virtual operating system\nrunning in its own\n[container](https://www.sdxcentral.com/cloud/containers/definitions/what-is-docker-container-open-source-project/).\nAnything running inside the container thinks it has the whole machine to\nitself, but in reality there can be many containers running on a single\nmachine. Unlike full virtual machines, Docker containers are super fast to\ncreate and destroy, making them great choices for setting up temporary\nenvironments for building and testing.\n\n\nThis [Docker image (`openjdk:8-jdk`)](https://hub.docker.com/_/openjdk/)\nworks perfectly for our use case, as it is just a barebones installation of\nDebian with Java pre-installed. We then run additional commands further down\nin our config to make our image capable of building Android apps.\n\n\n#### Defining variables\n\n\n```yml\n\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n```\n\n\nThese are variables we'll use throughout our script. They're named to match\nthe properties you would typically specify in your app's `build.gradle`.\n\n\n- `ANDROID_COMPILE_SDK` is the version of Android you're compiling with. It\nshould match `compileSdkVersion`.\n\n- `ANDROID_BUILD_TOOLS` is the version of the Android build tools you are\nusing. It should match `buildToolsVersion`.\n\n- `ANDROID_SDK_TOOLS` is a little funny. It's what version of the command\nline tools we're going to download from the [official\nsite](https://developer.android.com/studio/index.html). So, that number\nreally just comes from the latest version available there.\n\n\n#### Installing packages\n\n{:.special-h4}\n\n\n```yml\n\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n```\n\n\nThis starts the block of the commands that will be run before each job in\nour config.\n\n\nThese commands ensure that our package repository listings are up to date,\nand it installs packages we'll be using later on, namely: `wget`, `tar`,\n`unzip`, and some packages that are necessary to allow 64-bit machines to\nrun Android's 32-bit tools.\n\n\n#### Installing the Android SDK\n\n\n```yml\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n```\n\n\nHere we're downloading the Android SDK tools from their official location,\nusing our `ANDROID_SDK_TOOLS` variable to specify the version. Afterwards,\nwe're unzipping the tools and running a series of `sdkmanager` commands to\ninstall the necessary Android SDK packages that will allow our app to build.\n\n\n#### Setting up the environment\n\n\n```yml\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n```\n\n\nFinally, we wrap up the `before_script` section of our config with a few\nremaining tasks. First, we set the `ANDROID_HOME` environment variable to\nthe SDK location, which is necessary for our app to build. Next, we add the\nplatform tools to our `PATH`, allowing us to use the `adb` command without\nspecifying its full path, which is important when we run a downloaded script\nlater. Next, we ensure that `gradlew` is executable, as sometimes Git will\nmess up permissions.\n\n\nThe next command `yes | android-sdk-linux/tools/bin/sdkmanager --licenses`\nis responsible for accepting the SDK licenses. Because the unix `yes`\ncommand results in an EPIPE error once the pipe is broken (when the\nsdkmanager quits normally), we temporarily wrap the command in `+o pipefile`\nso that it does not terminate script execution when it fails.\n\n\n#### Defining the stages\n\n\n```yml\n\nstages:\n  - build\n  - test\n```\n\n\nHere we're defining the different\n[stages](https://docs.gitlab.com/ee/ci/yaml/#stages) of our build. We can\ncall these anything we want. A stage can be thought of as a group of\n[jobs](https://docs.gitlab.com/ee/ci/jobs/). All of the jobs in the same\nstage happen in parallel, and all jobs in one stage must be completed before\nthe jobs in the subsequent stage begin. We've defined two stages: `build`\nand `test`. They do exactly what you think: the `build` stage ensures the\napp compiles, and the `test` stage runs our unit and functional tests.\n\n\n#### Building the app\n\n\n```yml\n\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n```\n\n\nThis defines our first job, called `build`. It has two parts - a linter to\nensure that the submitted code is up to snuff, and the actual compilation of\nthe code (and configuration of the `artifacts` that GitLab should expect to\nfind). These are run in parallel for maximum efficiency.\n\n\n#### Running tests\n\n\n```yml\n\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\n\nThis defines a job called `debugTests` that runs during the `test` stage.\nNothing too crazy here about setting something simple like this up!\n\n\nIf we had wanted to get Jacoco also working, that would be very\nstraightforward. Simply adding a section as follows would work - the only\nadditional thing you'd need to do is add a secret variable containing your\npersonal `COVERALLS_REPO_TOKEN`:\n\n\n```yml\n\ncoverageTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain jacocoTestReport coveralls\n```\n\n\n## Run your new CI setup\n\n\nAfter you've added your new `.gitlab-ci.yml` file to the root of your\ndirectory, just push your changes to the appropriate branch and off you go!\nYou can see your running builds in the **Pipelines** tab of your project.\nYou can even watch your build execute live and see the runner's output,\nallowing you to debug problems easily.\n\n\n![Pipelines tab\nscreenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-01.png){:.shadow}\n\n\nAfter your build is done, you can retrieve your build artifacts:\n\n\n- First, click on your completed build, then navigate to the Jobs tab:\n\n\n![Build details button\nscreenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-02.png){:.shadow}\n\n\nFrom here, simply click on the download button to download your build\nartifacts.\n\n\n## Conclusion\n\n\nSo, there you have it! You now know how to create a GitLab CI config that\nwill ensure your app:\n\n\n- Compiles\n\n- Passes tests\n\n- Allows you to access your build artifacts (like your\n[APK](https://en.wikipedia.org/wiki/Android_application_package))\nafterwards.\n\n\nYou can take a look at my local copy of the Materialistic repository, with\neverything up and running, at [this\nlink](https://gitlab.com/jyavorska/androidblog-2018)\n\n\nEnjoy your newfound app stability :)\n\n\n\u003C!-- closes https://gitlab.com/gitlab-com/www-gitlab-com/issues/3167 -->\n\n\u003C!-- cover image: https://unsplash.com/photos/aso6SYJZGps -->\n\n\n\u003Cstyle>\n  img {\n    display: block;\n    margin: 0 auto 20px auto;\n  }\n  .special-h4 {\n    margin-top: 20px !important;\n  }\n\u003C/style>\n",[109,9],{"slug":1636,"featured":6,"template":700},"setting-up-gitlab-ci-for-android-projects","content:en-us:blog:setting-up-gitlab-ci-for-android-projects.yml","Setting Up Gitlab Ci For Android Projects","en-us/blog/setting-up-gitlab-ci-for-android-projects.yml","en-us/blog/setting-up-gitlab-ci-for-android-projects",{"_path":1642,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1643,"content":1649,"config":1655,"_id":1657,"_type":14,"title":1658,"_source":16,"_file":1659,"_stem":1660,"_extension":19},"/en-us/blog/start-using-git",{"title":1644,"description":1645,"ogTitle":1644,"ogDescription":1645,"noIndex":6,"ogImage":1646,"ogUrl":1647,"ogSiteName":686,"ogType":687,"canonicalUrls":1647,"schema":1648},"How to tidy up your merge requests with Git","Here's how to use a Git feature that saves a lot of time and cleans up your MRs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672243/Blog/Hero%20Images/git-tricks-cover-image.png","https://about.gitlab.com/blog/start-using-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to tidy up your merge requests with Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ronald van Zon\"}],\n        \"datePublished\": \"2019-02-07\",\n      }",{"title":1644,"description":1645,"authors":1650,"heroImage":1646,"date":1652,"body":1653,"category":741,"tags":1654},[1651],"Ronald van Zon","2019-02-07","\n\nI've worked on a lot of open source projects and one thing they all have in common is\nwhen you create a merge request (or pull request) they will often ask, \"Can you clean up your request?\"\nbecause commits like *fix typo* should not be included in a Git history.\n\nNow there are a few ways of cleaning up commits and I'll show you what I have found to be the easiest way.\n\nBelow is an example scenario where I use a feature of Git that has saved me a lot of time.\nI have a tiny project seen in the image below.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_demo_project.png){: .shadow.medium.center}\n\nNow I like to run my `main.py` in a test environment to see if it works as expected.\nI like to do that by configuring a `.gitlab-ci.yml` to run `main.py`.\nAlthough this is extremely easy, for this example I made sure I increased the number of commits\nto illustrate my example a bit more clearly. So after some time my commit history looks like this:\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_bad.png){: .shadow.medium.center}\n\nHere you can see my first three commits add `README.md`, `main.py` and `.gitlab-ci.yml`.\nA few commits update my `gitlab-ci` file, trying some stuff out, and fixing typos.\nThere's also a commit that cleans up my `gitlab-ci` and two more to fix and clean up `main.py`.\n\nNow some of you might see this and think, \"Looks good,\" while others might want to scream at me\nfor making a mess out of my commits.\n\nHow do we fix it?\n\n## How to consolidate your commits\n\nFirst, let's revert the last two commits using [reset](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#unstage-all-changes-that-have-been-added-to-the-staging-area).\nWe don't want to lose our changes so we will use `git reset --soft HEAD~2`.\n`--soft` will keep our changes of the files and `HEAD~2` tells Git the two commits from `HEAD` position should be reverted.\n\nWe create a new commit, `git commit --fixup 6c29979`. This will create a commit called `fixup! Add main Python file`.\nWhen we run `git rebase -i --autosquash 24d214a` we can see below that our `fixup` commit has been moved below\nthe commit we referenced with the tag *6c29979*. I could save this and the fixup will be merged into the commit above.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_1.png){: .shadow.medium.center}\n\nBut if we look at the commits below the *fixup*, we see that all the commits are related to the *.gitlab-ci.yml*\nand by making a small change here, we can clean up my commits in a single go. We will change the *pick* to *fixup* for all\ncommits but `Add default gitlab-ci` (shown in the image below) and we will save this.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_2.png){: .shadow.medium.center}\n\nChecking our Git log, we see that our long list of commits has been reduced to just three. There is a big change that\nyou should be aware of: because I have just rewritten my Git history I will have to use `git push --force` to update\nany *remote repository*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_good.png){: .shadow.medium.center}\n\nThis looks a lot better now; only the relevant commits are left. But could we have prevented this while working on this\nfeature? The answer is yes.\n\nWe could have used `git commit --amend` to add almost every commit behind *19d8353 Add default gitlab-ci*.\nThis wouldn't require any new commit for any changes that we were making to our `.gitlab-ci.yml` file. We would have ended\nup with the following and we already know how to handle the *fixup*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_alternative.png){: .shadow.medium.center}\n\nSomething to keep in mind when using features that rewrite the history of your Git repository: If you already\npushed your previous commits to a *remote repository* you will have to use `git push --force` to overwrite the\nhistory of the *remote repository*. Bad use of this could cause serious problems, so be careful!\nIf you run into trouble, a useful guide that could help you recover from this is [git push --force and how to deal with it](https://evilmartians.com/chronicles/git-push",[9,918,743],{"slug":1656,"featured":6,"template":700},"start-using-git","content:en-us:blog:start-using-git.yml","Start Using Git","en-us/blog/start-using-git.yml","en-us/blog/start-using-git",{"_path":1662,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1663,"content":1669,"config":1675,"_id":1677,"_type":14,"title":1678,"_source":16,"_file":1679,"_stem":1680,"_extension":19},"/en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"title":1664,"description":1665,"ogTitle":1664,"ogDescription":1665,"noIndex":6,"ogImage":1666,"ogUrl":1667,"ogSiteName":686,"ogType":687,"canonicalUrls":1667,"schema":1668},"Test all the things in GitLab CI with Docker by example","Running tests is easier than you think – guest author Gabriel Le Breton shares his presentation about testing everything automatically with GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680261/Blog/Hero%20Images/test-all-the-things-in-gitlab-ci-with-docker-by-example.jpg","https://about.gitlab.com/blog/test-all-the-things-gitlab-ci-docker-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Test all the things in GitLab CI with Docker by example\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Le Breton\"}],\n        \"datePublished\": \"2018-02-05\",\n      }",{"title":1664,"description":1665,"authors":1670,"heroImage":1666,"date":1672,"body":1673,"category":741,"tags":1674},[1671],"Gabriel Le Breton","2018-02-05","\n\nDo you write tests? Or do you skip them because it’s too complicated to run? Or maybe developers on your team just don’t care? You should take a few minutes and set up CI so you can enforce good practices. Good news, you can test [all the things](http://knowyourmeme.com/memes/all-the-things) automagically in [GitLab CI/CD](/solutions/continuous-integration/) with Docker and very little effort 🤘\n\n\u003C!-- more -->\n\nI recently gave a presentation at the [SagLacIO](http://saglac.io/) about [GitLab CI/CD](/solutions/continuous-integration/).\n\n## Getting started\n\nFirst, you’ll need an account at [GitLab.com](https://gitlab.com/). If you don’t already have one, you can open an account with no problem. [GitLab’s free tier](/stages-devops-lifecycle/) gives you a ton of features, unlimited free hosted repositories, 2,000 CI build minutes per month, etc. You can even use your own task runners in case you bust that limit.\n\n### Useful links\n\n- [GitLab.com](https://gitlab.com/)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/) 📗\n- [.gitlab-ci.yml documentation](https://docs.gitlab.com/ee/ci/yaml/) 📕\n- [.gitlab-ci.yml linter: gitlab.com/ci/lint](https://gitlab.com/ci/lint/) ✅\n- [gitlab-ci nodejs example project](https://gitlab.com/gableroux/gitlab-ci-example-nodejs)\n- [gitlab-ci Docker example project](https://gitlab.com/gableroux/gitlab-ci-example-docker)\n- [gitlab-ci django example project](https://gitlab.com/gableroux/gitlab-ci-example-django)\n- [Unity3D Docker project](https://gitlab.com/gableroux/unity3d) running in gitlab-ci and published to [Docker Hub](https://hub.docker.com/r/gableroux/unity3d/)\n- [How to publish Docker images to Docker Hub from gitlab-ci on Stack Overflow](https://stackoverflow.com/questions/45517733/how-to-publish-docker-images-to-docker-hub-from-gitlab-ci)\n\n## Here go the slides\n\nScroll through the slides from my presentation on GitLab CI/CD at SagLacIO, you’ll have fun 🤘\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/10835yig54EbR_OQcxSXURkPk_0zkhLxaWHdRdXb-yWw/embed?start=false&amp;loop=false&amp;delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nIf you have suggestions, feel free to poke me or [open an issue](https://github.com/GabLeRoux/gableroux.github.io/issues).\n\n *[Test all the things in GitLab CI with Docker by example](https://gableroux.com/saglacio/2018/01/16/test-all-the-things-in-gitlab-ci-with-docker-by-example/) was originally published on gableroux.com.*\n\n *Cover photo by [Federico Beccari](https://unsplash.com/photos/ahi73ZN5P0Y?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n {: .note}\n",[9,109],{"slug":1676,"featured":6,"template":700},"test-all-the-things-gitlab-ci-docker-examples","content:en-us:blog:test-all-the-things-gitlab-ci-docker-examples.yml","Test All The Things Gitlab Ci Docker Examples","en-us/blog/test-all-the-things-gitlab-ci-docker-examples.yml","en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"_path":1682,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1683,"content":1689,"config":1693,"_id":1695,"_type":14,"title":1696,"_source":16,"_file":1697,"_stem":1698,"_extension":19},"/en-us/blog/thelastmile-gitlab",{"title":1684,"description":1685,"ogTitle":1684,"ogDescription":1685,"noIndex":6,"ogImage":1686,"ogUrl":1687,"ogSiteName":686,"ogType":687,"canonicalUrls":1687,"schema":1688},"Inside the collaboration between GitLab and The Last Mile","GitLab teamed up with The Last Mile to bring open source DevOps and tech mentorship to incarcerated populations across the United States.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681743/Blog/Hero%20Images/tlm-blogpost-banner.png","https://about.gitlab.com/blog/thelastmile-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the collaboration between GitLab and The Last Mile\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":1684,"description":1685,"authors":1690,"heroImage":1686,"date":1389,"body":1691,"category":694,"tags":1692},[1119],"\n\n[The Last Mile (TLM)](https://thelastmile.org/), an organization focused on changing lives through technology, is tackling the daunting problem of mass incarceration in the United States by providing education and career training opportunities to incarcerated individuals to help break the generational cycle of incarceration. GitLab team members with similar passions and ideas connected with The Last Mile team and built a partnership to help bring the tech industry and mentorship directly to incarcerated individuals.\n\n## AMA to Coffee Chat to Partnership\n\nThe idea for TLM partnership originated during an AMA (or \"Ask Me Anything\" session) between GitLab CEO, [Sid Sijbrandij](/company/team/#sytses), and GitLab team members. [In one of these AMAs](https://www.youtube.com/watch?v=qi9zrymBO8o), [Tucker Logan](/company/team/#tuckcodes), a federal solutions architect at GitLab, asked Sid about the inspiration behind his [tweet](https://twitter.com/sytses/status/1227319454817804288) about mass incarceration. In a follow-up question, [Morgen Smith](/company/team/#msmith6), a sales development representative (SDR) for the Americas, asked Sid if GitLab would consider creating initiatives to help combat the school-to-prison pipeline.\n\nAs a former educator, Morgen has witnessed first-hand the national trend of disadvantaged youth being agressively disciplined in schools, which can then lead to juvenile offenses and later to formal charges. During the AMA, Morgen asked Sid: \"What do you think GitLab could do to encourage minority youth in this situation to be inspired by opportunities in tech?\" Sid shared his support and passion for the topic, and invited Morgen and Tyler to host an [open coffee chat](/company/culture/all-remote/informal-communication/#coffee-chats) on the topic to brainstorm ideas and next steps.\n\nDuring the coffee chat, Sid decided to take the smallest step, first. He visited San Quentin State Prison in San Rafael, Calif., and organized a call with Chris Redlitz, a co-founder of TLM. It turns out that TLM was using GitLab internally and also using the GitLab Community Edition to train nearly 300 students participating in their programs about how to use DevOps.\n\nTLM is a nonprofit program that started at San Quentin. TLM works with the incarcerated populations at men’s, women’s, and young adult correctional facilities to help them build relevant skills in technology with the goal of preparing individuals for successful reentry and building careers in business and technology. Today, TLM is in 23 classrooms across six states and has served 622 students since its inception.\n\n## TLM students learn DevOps with GitLab\n\nParticipants in TLM use the self-managed, free open core version of GitLab in their courses on Web Development. Each of the twenty individual classrooms have their own self-managed instance which around 20 students use to create and host their own private repositories. The sandbox environments are deployed centrally via Google Cloud. The core curriculum includes HTML/CSS and JavaScript, Node.js, Express.js, React.js, and Mongodb. GitLab is used primarily as a [source code management tool](/solutions/source-code-management/) for the students. Students write and commit code to personal repositories during course assignments. TLM Remote Instruction team also manages student-facing GitLab repositories to demonstrate industry best practices in merging, code collaboration, and version control platforms. Additionally, TLM leverages GitLab by providing students access to their repositories after they are released from prison, preserving commit history and all version control for the aspiring coders.\n\n\"By utilizing GitLab, The Last Mile students become comfortable using a best-in-class open source DevOps tool,\" says Tulio Cardozo, IT Manager, TLM. \"This experience empowers our students as aspiring software engineers, enabling them to enter the workforce with the collaboration and communication framework skills employers demand.\"\n\nThe GitLab team is partnering with the TLM Programs department to organize a series of webinars and workshops for the students. The first webinar kicked off in June of 2020 and was broadcast to 27 students (men, women, and youth programs), across four classrooms in several states. The topic was an introduction to GitLab and DevOps. Sid joined and shared the story of founding GitLab and his journey in tech. [Brendan O’Leary](/company/team/#brendan), a senior developer evangelist at GitLab, provided an overview of DevOps and explained how GitLab is the first single application for the entire DevOps lifecycle.\n\n\"The students appreciated the information on how to get started as new developers. Sid and Brendan helped the students believe they could accomplish anything with enough hard work,\" says a classroom facilitator from the Pendleton Youth Correctional Facility in Indiana.\n\nThe TLM team added that the webinar exposed students to a large company that works remotely and introduced them to an industry-recognized brand that the students use. In addition to the value of the content itself, there was a Q&A portion of the session where the studetns asked questions about the technology itself, such as how to start an open-source project and protecting intellectual property in open source, and about the facilitators' personal journey into tech.\n\nWatch the webinar with GitLab and TLM below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/ejHmvMjXJVU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn addition to the general workshop, the teams also collaborated on more technical content. The students at the Pendleton Juvenile Correctional Facility had a very special guest visit their [Web Development Fundamentals Course](https://thelastmile.org/our-work/), [Natalia Tepluhina](/company/team/#ntepluhina). Natalia, who currently lives in the Ukraine, is a frontend engineer at GitLab and also serves as a [core Vue.js team member](https://vuejs.org/v2/guide/team.html) and [core team member](/community/core-team/) of GitLab itself. Natalia answered a variety of questions about how to approach learning Javascript and provided a few demos related to specific questions from the students.\n\n## Mentorship for a career in DevOps\n\nGitLab and TLM also partnered on a series of Technical recruiting workshops with the classrooms. These have definitely been one of the highlights of the partnership thus far. In these workshops, a GitLab recruiter gave a presentation on the technical recruiting processes at GitLab, best practices during the application process and interview process, as well as an overview of what to expect during an interview. During each of the four sessions, the recruiters directly engaged with the participants, who asked a variety of questions, including:\n\n* How do I address incarceration on my resume?\n* What about background checks?\n* How do I gain professional experience while incarcerated?\n\nThe GitLab recruiting team was very sensitive to the participants' concerns and provided honest, clear answers, and great suggestions. The recruiters shared that during the process candidates should think of their recruiter as a resource, and they can always ask to speak to the People team at GitLab in confidence if it would help reassure them with any concerns they have regarding their criminal records. The recruiters encouraged the students to highlight their work in TLM courses on their resume and think about whether they can use course projects to start to build a portfolio. In addition, the facilitators encouraged participants to think about contributing to open source projects as a way to build technical skills, increase their network and mentorship opportunities.\n\n## How can open source help incarcerated populations gain experience in tech?\n\nThe discussion around contributing to open source projects as a way to build technical skills sparked a few different exciting ideas with the teams. One of these ideas was to hold a first time contributor workshop with alumni from TLM. The workshop was held in September 2020 had 16 alumni participants, four GitLab team members, including Sid, and five TLM team members. The workshop covered the basics on how to contribute to GitLab and demonstrated the step-by-step process. Participants were [provided an issue](https://gitlab.com/gitlab-org/gitlab/-/issues/247284) with a list of simple fixes with the label [\"good-for-new-contributors\"](https://gitlab.com/groups/gitlab-org/-/labels?utf8=%E2%9C%93&subscribed=&search=good+for+new+contributors) in the GitLab docs or handbook with typos or other minor changes. We had a few merge requests after just a few hours of the workshop! Participants were encouraged to tag GitLab team members for recognition and to win a pair of tanuki socks – by the end of the week we had given away six pairs of socks.\n\nParticipants and instructors appreciated the opportunity to learn in a hands-on way during the workshop:\n\n\"Thank you for the opportunity to participate in the GitLab workshop. I am so grateful to the GitLab staff for taking the time to introduce those of us who are new to GitLab to the history and functionality of the company. I learned so much, not just about how I can utilize GitLab to accomplish personal tasks more efficiently, but also how I can contribute and collaborate more with others and contribute to my local and global communities.\" - TLM staff and alumna.\n\nThe GitLab team found the experience equally rewarding. \"Working with The Last Mile was such a rewarding experience! When I think about how our product takes in contributions from all over the world and knowing it is also leveraged by those currently and or previously incarcerated really shows how truly 'inclusive' Git can be. Additionally, the empowerment it offers and the gift of knowledge and skill that can't be taken away is invaluable,\" says [Candace Brydsong Williams](/company/team/#cwilliams3), manage of the Diversity, Inclusion and Belonging program at GitLab.\n\n## How TLM uses GitLab technology\n\nGitLab also provides free licenses of our top-tier hosted application for the TLM team, who use our DevOps technology in nearly every aspect of their operations.\n\nTLM transitioned from GitHub to GitLab in 2019 after we provided the licenses. Initially, GitLab was used primarily in TLM's engineering department to track all internal processes with issues and Wikis. Infrastructure as code data and internal information is stored in repositories. Soon, TLM adopted GitLab technology in their education and programs departments, where it is now being used for project management. TLM now uses sprint planning, milestones, issues, priority levels, burndown charts, and issues boards to streamline project management across their departments.\n\nThe Last Mile has introduced numerous new and distinct use cases for GitLab. These include:\n\n* Issues are used to manage classroom facilities including to keep track of the impacts of COVID-19 on each classroom. For example, status updates are recorded on the issue and in the comments.\n* [The Last Mile’s reentry program](https://thelastmile.org/our-work/#reentry) uses GitLab to track returned citizen onboarding and service delivery process as well as tracking internal workloads, task efforts, and collaboration across teams. To-do lists are used to manage actions and labels are used to view the status of various efforts.\n\n\"The GitLab platform provides The Last Mile with a remarkable range of solutions -- from our application of GitOps workflows for managing our hybrid infrastructure, to our org-wide application of issues across teams,\" says Mike Bowie, Director of Engineering, The Last Mile. \"By solving such a broad range of our needs, GitLab enables us to focus on delivering value into our programs, instead of administering and maintaining a plethora of disparate tools.\"\n",[697,9,807,1411,831],{"slug":1694,"featured":6,"template":700},"thelastmile-gitlab","content:en-us:blog:thelastmile-gitlab.yml","Thelastmile Gitlab","en-us/blog/thelastmile-gitlab.yml","en-us/blog/thelastmile-gitlab",{"_path":1700,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1701,"content":1707,"config":1712,"_id":1714,"_type":14,"title":1715,"_source":16,"_file":1716,"_stem":1717,"_extension":19},"/en-us/blog/three-teams-left-jenkins-heres-why",{"title":1702,"description":1703,"ogTitle":1702,"ogDescription":1703,"noIndex":6,"ogImage":1704,"ogUrl":1705,"ogSiteName":686,"ogType":687,"canonicalUrls":1705,"schema":1706},"3 Teams left Jenkins: Here’s why","How three different teams – Alteryx, ANWB, and EAB – shifted away from Jenkins for smoother sailing with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671932/Blog/Hero%20Images/jenkins-to-gitlab-sailboat.jpg","https://about.gitlab.com/blog/three-teams-left-jenkins-heres-why","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Teams left Jenkins: Here’s why\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-07-23\",\n      }",{"title":1702,"description":1703,"authors":1708,"heroImage":1704,"date":1709,"body":1710,"category":804,"tags":1711},[894],"2019-07-23","\nAs many companies know, continuous integration and build processes are challenging. Complex tool\nintegrations, pieced-together pipelines, and overall system breakdowns are time consuming for\neven the most experienced teams. The longer it takes for system recovery, the more costly it\nbecomes, creating more risk for the organization as a whole. Competitive companies are always on\nthe lookout for better solutions and they're increasingly turning to GitLab to do just that.\n\nThree companies – Alteryx, ANWB, and EAB – all experienced unique challenges with Jenkins.\nWe highlight how each of these teams made the successful move to\n[GitLab from Jenkins](/solutions/jenkins/). Learn how each team\naccelerated deployment, improved CI/CD pipelines, created developer transparency, and\nalleviated toolchain stressors after making the switch to GitLab.\n\n## Alteryx: Builds down from 3 hours to 30 minutes\n\nAlteryx, a prominent end-to-end analytics platform, was using a legacy system with Jenkins\nthat was older, clunky, and difficult to manage. The team was looking to modernize their architecture\nand to improve their overall software development lifecycle.\n\nThey turned to GitLab because it offers many solutions in one tool. With GitLab, the Alteryx team is now\ncapable of managing source code, CI/CD, code reviews, and security scanning all in one place.\nA build that took three hours with Jenkins is now just 30 minutes in GitLab.\n\nAs Alteryx continues to grow in the analytics space, GitLab will continue to add new features\nto support the company's expanding needs. Learn more about [Alteryx’s journey](/customers/alteryx/).\n\n## ANWB: Increased deployments\n\nWith over 4.4 million members, ANWB offers services for credit cards, bicycle maintenance,\ncar sales, and travel throughout the Netherlands. Both the mobile and web development\nteams have their hands full with popular offerings like mapping and driver intelligence services.\n\nANWB was struggling with an outdated toolchain that included Jenkins version 1 as a build server.\nThe company wanted to speed up development, eliminate isolated and outdated processes and give\nits teams autonomy.\n\nWith GitLab, ANWB can now manage separate teams, increase deployments, and support a culture\nwhere everyone contributes freely to colleagues' code repositories. ANWB has plans to move toward a\ncloud-centric framework and GitLab has helped to pave that road. Learn more about [ANWB’s path to success](/customers/anwb/).\n\n## EAB: \"Quality first\" culture\n\nServing over 1,500 schools, colleges, and universities, EAB uses data analytics and transformative\nmeasures to help students stay enrolled in education. The EAB team had to rely on several tools,\nincluding Jenkins, which made continuous integration overly complex and time consuming.\nDevelopers wanted to consolidate their various tools to create faster builds with much less maintenance.\n\nEAB initially turned to GitLab because of our regular feature releases and [tiered (and affordable) pricing](/pricing/).\nThe EAB development team soon realized they could have a steady pace of\nbuild releases without having to use multiple tools to make it happen. In just six months, workflow increased\nand the company plans to continue to roll out a \"quality first\" culture using GitLab as a guide.\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch the [Migrating from Jenkins to GitLab](https://www.youtube.com/watch?v=RlEVGOpYF5Y) demo\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nCover image by [Fab Lentz](https://unsplash.com/@fossy) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,1534,109],{"slug":1713,"featured":6,"template":700},"three-teams-left-jenkins-heres-why","content:en-us:blog:three-teams-left-jenkins-heres-why.yml","Three Teams Left Jenkins Heres Why","en-us/blog/three-teams-left-jenkins-heres-why.yml","en-us/blog/three-teams-left-jenkins-heres-why",{"_path":1719,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1720,"content":1726,"config":1731,"_id":1733,"_type":14,"title":1734,"_source":16,"_file":1735,"_stem":1736,"_extension":19},"/en-us/blog/using-gitlab-to-manage-house-renovation-priorities",{"title":1721,"description":1722,"ogTitle":1721,"ogDescription":1722,"noIndex":6,"ogImage":1723,"ogUrl":1724,"ogSiteName":686,"ogType":687,"canonicalUrls":1724,"schema":1725},"Using GitLab to project manage home renovation priorities","Solutions Architect Brendan O'Leary shares how he and his family use GitLab Issue Boards for an unconventional purpose: home improvement prioritization!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680235/Blog/Hero%20Images/home-improvement.jpg","https://about.gitlab.com/blog/using-gitlab-to-manage-house-renovation-priorities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using GitLab to project manage home renovation priorities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2018-02-08\",\n      }",{"title":1721,"description":1722,"authors":1727,"heroImage":1723,"date":1728,"body":1729,"category":301,"tags":1730},[1326],"2018-02-08","\n\nLast summer my wife and I bought a new house for our ever-growing family. Before we moved in, we had a couple of improvements made – wood floors to replace the aging carpet in the master bedroom, some required structural fixes. However, when we bought the house, we knew there would be a lot more we wanted to do over the years. When it came to organizing those ideas into things that need to happen sooner rather than later and those that could wait, however, we found ourselves struggling to keep all of the plans in order.\n\n\u003C!-- more -->\n\n## Trying to get organized\n\nI've been able to complete a few other projects since we moved in – but most were small in scale. A built-in shelf wall for my wife's office, painting and staining the new deck, and of course a DIY standing desk to use in my new office kitchen (which is also the house's kitchen... [working from home for the win!](/company/culture/all-remote/)). These projects were great, but we needed a way to organize and prioritize larger renovation projects.\n\n![Home improvement examples](https://about.gitlab.com/images/blogimages/home-improvement-examples.png){: .shadow}\n\n*\u003Csmall>Clockwise, from left: built-in shelf wall, painted and stained deck, DIY standing desk\u003C/small>*\n\nI was a GitLab user for years before I even became a GitLab team-member. I've always hosted my side-project code in GitLab.com since GitLab offers [unlimited private repositories](/pricing/#gitlab-com) for free. For project management in my \"day job\" I've used dozens of other tools outside of GitLab, so when I joined it was the first time I saw the full breadth of what GitLab offers in issue management.\n\nIn thinking about the other tools I've used in the past, they didn't seem to meet the full bar of what I was looking for to solve our problem. As a mother of four young children, my wife is always on the go... but I'm on a computer all day long. So we needed something that worked seamlessly between platforms. We also needed to be able to easily re-arrange and re-prioritize items. Also, I fancy myself a bit of a DIY-er, so I wanted to be able to label some items as at least *possible* for me to maybe complete myself. All of these requirements had me wondering what tool would be best for my wife and me to collaborate on.\n\n## Enter GitLab Issue Boards\n\nWith these requirements, and my newfound GitLab knowledge, I was able to come up with a novel solution to the problem we were having: why not use a [GitLab Issue Board](/stages-devops-lifecycle/issueboard/) to manage our ever-changing home renovation priorities?\n\nWith Issue Boards, we would have a fantastic solution for mobile and desktop (shout out to the [GitLab UX team](https://docs.gitlab.com/ee/development/ux_guide/)!). With [labels](https://docs.gitlab.com/ee/user/project/labels.html), I could organize and group issues however we wanted. And the customizable columns would allow us to prioritize, track and manage the various issues and ideas.\n\n## How the board works\n\nTo start, I [created a new group on GitLab.com](https://gitlab.com/groups/new) to house (pun intended) everything for our family. I made a project in that group called `priorities` to be the central place to collect all the renovation ideas we had. In the future, I may have a project for a specific renovation, managing purchases, and contractors, etc.\n\nAs with every GitLab project, issues and issue boards were baked right in. I started adding issues right away – beginning with those that were at the top of mind, like the water heater that is at the end of its usable life, repairs to our front entryway, and window replacement.  My wife didn't have a GitLab.com account yet, but it was easy to add her to the project as a member just by putting her email address in on the member's page, allowing her to sign up and get access to the project in one step.\n\n![Invite member by e-mail](https://about.gitlab.com/images/blogimages/invite-member-by-email.png){: .shadow}\n\nTo get organized, I created a few labels: `P1` for top priority items, `DIY Possibility` for those I might be able to tackle on my own, and `Furniture` for those that involved furnishing various rooms. The labels will help filter issues so that if I find a free weekend, I can search for `DIY Possibility` issues to maybe get started on. Or if we go to a furniture store, we could filter to those issues to get an idea of cost while we are there.\n\nFor the board columns, I decided to use `P1` as the first column after Backlog to highlight those issues. From there, it's a matter of agreeing on an organization of priority 😃\n\n![Home improvement issue board](https://about.gitlab.com/images/blogimages/home-improvement-issue-board.png){: .shadow}\n\n## Where to go next\n\nNow it's time to execute! One thing we didn't account for in the first iteration was the scope of issues. Some things were relatively minor regarding time and investment. Others (like replacing all 27 windows!) are larger projects for which we need to budget. For this, we will be using [issue weight](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) to understand how different projects align with budget and time investment to pull off.\n\nIt's been an exciting experience using GitLab Issue Boards for something outside of the development space. We'd love to hear from you too about \"non-standard\" uses for GitLab's features. Feel free to comment on this post or tweet us [@GitLab](https://twitter.com/gitlab).\n\n*Cover photo by [George Pastushok](https://unsplash.com/photos/d0yNnTEjEWY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n{: .note}\n",[1411,9],{"slug":1732,"featured":6,"template":700},"using-gitlab-to-manage-house-renovation-priorities","content:en-us:blog:using-gitlab-to-manage-house-renovation-priorities.yml","Using Gitlab To Manage House Renovation Priorities","en-us/blog/using-gitlab-to-manage-house-renovation-priorities.yml","en-us/blog/using-gitlab-to-manage-house-renovation-priorities",{"_path":1738,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1739,"content":1745,"config":1751,"_id":1753,"_type":14,"title":1754,"_source":16,"_file":1755,"_stem":1756,"_extension":19},"/en-us/blog/verizon-customer-story",{"title":1740,"description":1741,"ogTitle":1740,"ogDescription":1741,"noIndex":6,"ogImage":1742,"ogUrl":1743,"ogSiteName":686,"ogType":687,"canonicalUrls":1743,"schema":1744},"Verizon cuts datacenter rebuilds from 30 days to 8 hours","Verizon utilized microservices, automation, and GitLab to reduce datacenter rebuilds to under 8 hours.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678933/Blog/Hero%20Images/verizon_video_blog.jpg","https://about.gitlab.com/blog/verizon-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kim Lock\"}],\n        \"datePublished\": \"2019-02-14\",\n      }",{"title":1746,"description":1741,"authors":1747,"heroImage":1742,"date":1748,"body":1749,"category":694,"tags":1750},"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab",[801],"2019-02-14","\nIn 2016, the [Verizon Connect](https://www.verizonconnect.com/) Telematics Container Cloud Platform team was struggling with data center\nbuilds that took 30 days. Working with legacy systems that included Java-based, monolithic\napplications, they also had a variety of disparate tools including BitBucket, Jenkins, and Jira\nin use throughout their environment.\n\n### Starting from scratch to move to microservices and increase automation\n\nThe group looked to move to a [microservices architecture](/blog/strategies-microservices-architecture/) to improve deploy speed and increase\nautomation. They also wanted to overcome manual errors, disjointed processes, and\nmanual deploys. \"We were just spending too much time doing stuff manually, so we decided\nto just start fresh and write everything from scratch,\" says Mohammed Mehdi, Principal DevOps, Verizon.\n\nAs they created this new infrastructure, they kept four key components in mind: architecture,\nautomation, extensibility, and being proactive and prepared for the future. They wanted to rebuild\ntheir data centers in less than 12 hours, instead of 30 days. They had a goal of 100 percent CI/CD.\nThey wanted to remove manual deployments, especially around the server and network deployments.\nThe team also focused on avoiding vendor lock-in by seeking open source tools to help them accomplish these goals.\n\nThe team looked to improve automation by focusing on simplification, standardization, and providing end-to-end visibility.\n\"We wanted easily repeatable, with zero-touch, zero-downtime deployments, automated tracking,\" Mehdi explains.\n\n### A single solution to meet their needs\n\nThe team chose GitLab to support this infrastructure initiative because it met a number of their qualifications, including being open source and offering Windows support. The team liked that it is easy to use and the UI easy to understand.\n\n\"Some of the other features that we really loved, and we didn’t find with any other CI/CD tool, are the project management\nfeatures,\" Mehdi says. \"GitLab replaced a bunch of disparate systems for us like Jira, BitBucket, and Jenkins. GitLab\nprovided us with a one-stop solution.\"\n\nThe Verizon Connect Telematics Container Cloud Platform team is using GitLab for:\n\n- [Code review](/blog/demo-mastering-code-review-with-gitlab/)\n- [CI/CD](/solutions/continuous-integration/)\n- [Issue tracking](/pricing/feature-comparison/)\n- [Source Code Management](/solutions/source-code-management/)\n- [Audit Management](https://docs.gitlab.com/ee/administration/audit_events.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nThe team has successfully achieved deployment flexibility and are platform agnostic. They now have\nstreamlined processes and developers can truly focus on differentiating tasks.\n\nThe team was able to reduce their complete datacenter deploy\nprocess to under eight hours because of the streamlined deploy and build processes\nthey enabled using GitLab. Learn how [Verizon Connect](https://www.verizonconnect.com/) is achieving this success by watching\nmore about their story and how they achieved their targets in [the YouTube video](https://youtu.be/zxMFaw5j6Zs) below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zxMFaw5j6Zs\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThanks for giving GitLab a shot, Verizon Connect!\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap) on [Unsplash](https://unsplash.com)\n{: .note}\n",[833,109,830,9,918],{"slug":1752,"featured":6,"template":700},"verizon-customer-story","content:en-us:blog:verizon-customer-story.yml","Verizon Customer Story","en-us/blog/verizon-customer-story.yml","en-us/blog/verizon-customer-story",{"_path":1758,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1759,"content":1765,"config":1771,"_id":1773,"_type":14,"title":1774,"_source":16,"_file":1775,"_stem":1776,"_extension":19},"/en-us/blog/vuejs-app-gitlab",{"title":1760,"description":1761,"ogTitle":1760,"ogDescription":1761,"noIndex":6,"ogImage":1762,"ogUrl":1763,"ogSiteName":686,"ogType":687,"canonicalUrls":1763,"schema":1764},"How to use GitLab CI/CD for Vue.js","Learn how to get the most out of GitLab CI/CD with this guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680363/Blog/Hero%20Images/build-test-deploy-vue.jpg","https://about.gitlab.com/blog/vuejs-app-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI/CD for Vue.js\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Simon Tarchichi\"}],\n        \"datePublished\": \"2017-09-12\",\n      }",{"title":1760,"description":1761,"authors":1766,"heroImage":1762,"date":1768,"body":1769,"category":741,"tags":1770},[1767],"Simon Tarchichi","2017-09-12","Continuous Integration allows you to:\n\n\n- Deploy your app instantly, when new code is pushed into a repo\n\n- Build your app (in our case `npm run build`)\n\n- Trigger test scripts (and block deployment if a test fails)\n\n\nIt is definitely worth the effort if you update your app regularly.\n\n\nGitLab is a service that started as an open-source GitHub competitor, mostly\nto host code in Git repositories, and evolved into an amazing tool that I\nwon’t introduce here, as it isn’t related to Vue.js. One thing though, they\nwere one of the first major companies to use Vue.js for their user\ninterface.\n\n\nDocker has to be mentioned as well. It is the most popular containerization\nservice. It basically means you get to execute code in a secure environment,\nconfigured exactly like your dev/prod. Very useful when you need to make\nsure your code is executed with all its dependencies.\n\n\nEach of these tools would require many posts to be covered. We’ll focus on\nsetting up [CI/CD](/topics/ci-cd/) for your Vue.js project. We’ll assume you\nhave no knowledge in the matter.\n\n\n[GitLab CI/CD is free for personal projects](/pricing/#gitlab-com), I don’t\nknow any other tool with such a beautiful UI that does that. If you do,\nplease let me know.\n\n\n### The .gitlab-ci.yml file\n\n\nCreate a `.gitlab-ci.yml` file at the root of your repo. GitLab will check\nfor this file when new code is pushed. If the file is present, it will\ndefine a [pipeline](https://docs.gitlab.com/ee/ci/pipelines/index.html),\nexecuted by a [GitLab Runner](http://docs.gitlab.com/runner/). Click the\nlinks if you are curious, or keep reading to see a working example.\n\n\nDefault stages of a pipeline are:\n\n\n1. build\n\n1. test\n\n1. deploy\n\n\nAgain, you don’t need to master this, but this is the most common use case.\nYou may not have set up unit tests, and if you haven’t, you may remove this\nstep from the file, GitLab won’t mind.\n\n\nHere is our file, you may copy/paste it in your repo:\n\n\n```\n\nbuild site:\n  image: node:6\n  stage: build\n  script:\n    - npm install --progress=false\n    - npm run build\n  artifacts:\n    expire_in: 1 week\n    paths:\n      - dist\n\nunit test:\n  image: node:6\n  stage: test\n  script:\n    - npm install --progress=false\n    - npm run unit\n\ndeploy:\n  image: alpine\n  stage: deploy\n  script:\n    - apk add --no-cache rsync openssh\n    - mkdir -p ~/.ssh\n    - echo \"$SSH_PRIVATE_KEY\" >> ~/.ssh/id_dsa\n    - chmod 600 ~/.ssh/id_dsa\n    - echo -e \"Host *\\n\\tStrictHostKeyChecking no\\n\\n\" > ~/.ssh/config\n    - rsync -rav --delete dist/ user@server.com:/your/project/path/\n  ```\n\n### Test our file\n\n\nNow commit and push the `.gitlab-ci.yml` file to your GitLab repo.\n\n\nHere is how it will look in the Pipelines tab of GitLab UI:\n\n\n![GitLab CI/CD\nPipelines](https://about.gitlab.com/images/blogimages/gitlab-ci-pipelines.png){:\n.shadow}\u003Cbr>\n\n\nThe green checkmark indicates that the step has succeeded and you can see\nthe logs when clicking it.\n\n\nIn the second example, the tests have failed, click the red mark to read the\nlogs and understand what went wrong.\n\n\n![GitLab CI/CD\nlogs](https://about.gitlab.com/images/blogimages/gitlab-ci-failed.png){:\n.shadow}\u003Cbr>\n\n\n### File anatomy\n\n\n- `image` is the link to the Docker image. I have chosen to use public\nofficial images, but you may use one from the Docker Hub or a private\nregistry.\n\n\n- `stage` should be `build`, `test` or `deploy` if you use defaults. But\nthat [can be customized](https://docs.gitlab.com/ee/ci/yaml/stages).\n\n\n- `script` are command lines executed inside our build environment.\n\n\n- `artifacts` describes a path to the build result. The files in this path\ncan be used in the next build steps (in `deploy` in our example). You can\ndownload artifacts from Gitlab UI.\n\n\nMore about the `.gitlab-ci.yml` file options [in the\ndocs](https://docs.gitlab.com/ee/ci/yaml/).\n\n\n### About the deployment script\n\n\nI have described my use case here, but it may not be the simplest. Relevant\nexamples for [deployment to Amazon\nS3](/blog/ci-deployment-and-environments/) or other services can\nbe found online.\n\n\nTo get it working, you’ll need to **provide GitLab with a private SSH key**.\nIf you are no security expert, then it is time to take advice from one. The\nbottom line is **do not give it your private SSH key**, create one that is\nused only by GitLab.\n\n\n```\n\n# create gitlab user\n\nadduser gitlab\n\n\n# generate a DSA SSH key\n\nsu -l gitlab\n\nssh-keygen -t dsa\n\n\n# authorize the key to log in using the public key and output the private\none\n\ncd .ssh\n\nmv id_dsa.pub authorized_keys\n\ncat id_dsa && rm id_dsa\n\n```\n\n\nThen go to GitLab UI “Settings” (the gear icon), then “Variables” and\ncopy/paste the content of your terminal in “Value”. The “Key” should be\n`SSH_PRIVATE_KEY`. This private key will be used to do the `rsync`.\n\n\n![GitLab CI/CD\nvariables](https://about.gitlab.com/images/blogimages/gitlab-ci-variables.png){:\n.shadow}\u003Cbr>\n\n\n## Links\n\n\n- [Sample GitLab repository](https://gitlab.com/kartsims/vue-ci)\n\n- [Gitlab CI/CD docs](https://docs.gitlab.com/ee/ci/)\n\n\nIf you need more information, leave a comment I’ll be happy to help you if I\ncan.\n\n\n\"[Golden Gate Bridge Vista\nPoint](https://unsplash.com/@tigesphotos?photo=-BiEu8VP9-M)\" by [Tiger\nRobinson](https://unsplash.com/@tigesphotos) on Unsplash\n\n{: .note}\n",[109,9],{"slug":1772,"featured":6,"template":700},"vuejs-app-gitlab","content:en-us:blog:vuejs-app-gitlab.yml","Vuejs App Gitlab","en-us/blog/vuejs-app-gitlab.yml","en-us/blog/vuejs-app-gitlab",{"_path":1778,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1779,"content":1785,"config":1790,"_id":1792,"_type":14,"title":1793,"_source":16,"_file":1794,"_stem":1795,"_extension":19},"/en-us/blog/wag-labs-blog-post",{"title":1780,"description":1781,"ogTitle":1780,"ogDescription":1781,"noIndex":6,"ogImage":1782,"ogUrl":1783,"ogSiteName":686,"ogType":687,"canonicalUrls":1783,"schema":1784},"How Wag! cut their release process from 40 minutes to just 6","The popular dog-walking app is rolling out new features faster and with more confidence as they adopt GitLab for more of their DevOps workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678923/Blog/Hero%20Images/dog-walking.jpg","https://about.gitlab.com/blog/wag-labs-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Wag! cut their release process from 40 minutes to just 6\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-16\",\n      }",{"title":1780,"description":1781,"authors":1786,"heroImage":1782,"date":1787,"body":1788,"category":694,"tags":1789},[1368],"2019-01-16","\nDo you own a dog and work outside of the home? If you do, or even just know someone who does, you know that finding a trustworthy caretaker is of the utmost importance. With dog walkers in cities and towns across the U.S., the folks at [Wag!](https://wagwalking.com/about) have proven to be a source of reliable caretakers for countless fur parents. In three years, the company has powered more than one billion walks via its app for on-demand dog walking, sitting, and boarding, that boasts of millions of users.\n\nWag! recently signed on with GitLab to make the most of their engineering hours and bring their customers new features and updates at a faster clip.\n\n### From version control, to CI, to the full pipeline\n\nHaving previously used GitLab as their main source of truth for repositories, Wag! initially planned to return to the app solely for [continuous integration (CI)](/solutions/continuous-integration/). But after giving it a whirl, they quickly expanded their strategy to include the use of other features.\n\n\"We started our GitLab project about seven or eight months ago,\" explains [Dave Bullock](https://www.linkedin.com/in/eecue), director of engineering at Wag! \"The original idea was to just use it as our CI platform. But as we built that out, we started using it for more and more tasks, and ended up using it for our full [CI/CD pipeline](/topics/ci-cd/). That includes both our application, so the CI/CD that powers the API, along with our infrastructure. We use GitLab with Terraform to test, review, save, and deploy all of our infrastructure as well as the application on two separate pipelines. Every team uses it in their application, whether it's the Android application, the web application, the API, or our infrastructure; it's all being tested, built, and deployed through GitLab.\"\n\n### Streamlining to a single application\n\nPart of GitLab's appeal stemmed from the [ability to do everything in one place](/topics/single-application/). Wag! was searching for an [integrated solution](/solutions/continuous-integration/) that would streamline their development process, and they found it in GitLab.\n\n\"We were previously using a combination of Travis and other random technologies, and we just wanted something with a little bit better interface, a little more control, and something that we owned as far as the hosting and the management,\" says Bullock. \"We really wanted to move towards a single, full-service application.\"\n\n>\"We just wanted something with a better interface, a little more control, and something that we owned as far as the hosting and the management. We really wanted to move towards a single, full-service application.\"\n\nThe impact of that choice is also being felt on the infrastructure side. Wag!'s infrastructure engineers no longer have to manually stage and test their work. They are now following the same basic workflow that is used for their app, while integrating Terraform to manage their infrastructure.\n\n\"Basically, one of our DevOps team members will make a change, cut a pull request, and it'll be reviewed by the team. If it looks good, we'll say, 'Okay, cool. Merge it into master,'\" Bullock explains. \"If it's one of the modules, we'll tag that module, update the reference to it, and then the CI pipeline will kick off. It'll test the syntax, look for any security issues, and alert a Slack channel if there are any. It'll then stage a full version of the environment and test it. So, it stages all the pieces: the database, cache, and everything else, and tests it all to make sure that it works, just like we would be testing our production website.\n\n\"If that passes, then it allows you to see what your changes are going to do before you apply them,\" he continues. \"We call it Terraform plan. So, it runs Terraform plan on each piece of our infrastructure, and it'll tell us something like, 'Hey, we see 34 changes and 2 destructions and 1 creation in this environment. Click here to review.' Then the group will review it and if it looks good, we'll apply it in production. Having that as a full pipeline is really great.\"\n\n>“Now it's so easy to deploy something and roll it back if there's an issue. It's taken the stress and the fear out of deploying into production.” – Dave Bullock, Director of Engineering\n\n### Easy learning curve\n\nSome of the Wag! engineers had working experience with GitLab, while others had not. Nonetheless, Bullock found the onboarding of his teams to be a fairly easy process due to the intuitive nature of the interface.\n\n\"I think once you kind of understand how CI works, it's basically about following things step by step,\" he says. \"Pipelines were a new concept to a lot of the team, but once you see it happening visually, it's really easy to understand what's going on, expand and add to it. It's a really useful interface. Seeing all those green dots or red dots makes it really clear what's going on.\"\n\n### Built-in security, shaving down test times and faster releases\n\nAs part of their ramp up in GitLab, the dog-walking service recently furled [automated security scanning and license management](/solutions/security-compliance/) into their workflow, with Bullock noting how \"great\" it is to have those features baked into the pipeline so that immediate action can be taken when needed.\n\nWag! currently issues three releases a day, with plans to bump that number up to eight or more. Since adopting GitLab, they have seen a massive improvement in the amount of time spent on the release process. **What previously took 40 minutes to an hour to accomplish, now takes just six minutes.**\n\n\"Traditionally, the release process was slow, fragile, and limited to only a few key release engineers who had access to 10 different systems to monitor, make changes, and log into to make updates and pull in the latest code. It was not optimal. Now it's literally a single pane of glass. A lot of it just happens automatically when you merge `develop` into `master` and tag it.\"\n\nThe release process time should improve even more once Wag! engineers switch from manually pushing parts of the release through to automating the process.\n\n\"Right now, we're still clicking through the interface and saying, 'Okay, do this, now let's monitor,'\" says Bullock. \"But I think as we become more comfortable with it, we'll go to fully automated deployments. Literally, just let it go and deploy. If we see an uptick in errors, we'll let it roll back on its own. But as it is now, it's so easy to deploy something and roll it back ourselves if there's an issue. It's taken the stress and the fear out of deploying into production.\"\n\n### Adopting DevOps\n\nWag!'s engineering team has big plans for 2019. They are currently in the process of moving their repositories from GitHub to GitLab and are planning to switch from Amazon ECS to [Kubernetes](/solutions/kubernetes/). This is all part of their roadmap to implementing DevOps.\n\n\"I think we're going to start working on the project in Q1 and it will be really awesome to have all the bells and functionality,\" Bullock says. \"We're excited about Auto DevOps and a lot of new things GitLab has coming down the pipeline. We're going to push pretty hard on that this year.\n\n\"I'm a big fan of DevOps in general, so I think the closer that you can bring the development engineers to the ops side, the better things work,\" he adds. \"I would love for every software engineer or backend engineer to take ownership of the environment that their code runs in, or at least be able to experiment with it and kind of instantly just spin up a full working environment that is the same as our production environment, which we do now, but not with Kubernetes. I think removing that friction is great.\"\n\n### Growing with GitLab\n\nGitLab's releases are a treat the folks at Wag! look forward to checking out each month. The rollout of new features, which are partly determined by user feedback, tend to correlate with the engineering needs of the growing dog-walking and boarding service.\n\n\"I think it's exciting that as we're growing and adding interesting pieces to our infrastructure and application, we're seeing GitLab grow with your monthly release cycles,\" says Bullock. \"Every month there's some new stuff that we're like, 'Oh cool, we could use that, that's perfect.' It's nice to have GitLab as a partner that's growing with us, and it's exciting to see the parallels of new features that you're launching and how it's solving our problems and optimizing things. There's all kinds of cool stuff, and every time we start using a new piece of GitLab, I feel like, 'Okay, that's great, we're really getting our money’s worth.'\"\n\nPhoto by [Andrii Podilnyk](https://unsplash.com/photos/dWSl8REfpoQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/dog-walk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[833,831,807,830,720,721,9,918],{"slug":1791,"featured":6,"template":700},"wag-labs-blog-post","content:en-us:blog:wag-labs-blog-post.yml","Wag Labs Blog Post","en-us/blog/wag-labs-blog-post.yml","en-us/blog/wag-labs-blog-post",{"_path":1797,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1798,"content":1804,"config":1811,"_id":1813,"_type":14,"title":1814,"_source":16,"_file":1815,"_stem":1816,"_extension":19},"/en-us/blog/we-need-to-talk-no-proxy",{"title":1799,"description":1800,"ogTitle":1799,"ogDescription":1800,"noIndex":6,"ogImage":1801,"ogUrl":1802,"ogSiteName":686,"ogType":687,"canonicalUrls":1802,"schema":1803},"We need to talk: Can we standardize NO_PROXY?","Subtle differences in proxy setting implementations led to surprising\nproblems for a GitLab customer. Here's how we got to the root of it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/we-need-to-talk-no-proxy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We need to talk: Can we standardize NO_PROXY?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2021-01-27\",\n      }",{"title":1799,"description":1800,"authors":1805,"heroImage":1801,"date":1807,"updatedDate":1808,"body":1809,"category":741,"tags":1810},[1806],"Stan Hu","2021-01-27","2025-06-09","If you've used a Web proxy server before, you're probably familiar with\nthe environment variables `http_proxy` or `HTTP_PROXY`. You may be less\nfamiliar with `no_proxy`, which provides a way to exclude traffic\ndestined to certain hosts from using the proxy. While HTTP is a\nwell-defined standard, no standard exists for how clients should handle\nthese variables. As a result, Web clients support these variables in\nsubtly different ways. For one GitLab customer, these differences led\nto a weekend of troubleshooting that uncovered why certain services\nstopped communicating.\n\n## What is a proxy server?\n\nA proxy server acts as an intermediary between your computer or network and the internet. When you send a request to access a website or other online resource, that request first goes to the proxy server. The proxy server then forwards the request to the actual destination and delivers the response back to you. Proxies can serve various purposes, including improving security, enhancing privacy, and controlling internet usage.\n\n## Proxy server environment variables\n\nLet's now look at what proxy server environment variables are, and how to define exemptions and handle exclusions with `no_proxy`.\n\n### Understanding proxy server environment variables \n\nToday, most Web clients support connection to proxy servers via\nenvironment variables:\n\n- `http_proxy` / `HTTP_PROXY`\n- `https_proxy` / `HTTPS_PROXY`\n- `no_proxy` / `NO_PROXY`\n\nThese variables tell the client what URL should be used to access the\nproxy servers and which exceptions should be made. For example, if you\nhad a proxy server listening on `http://alice.example.com:8080`, you\nmight use it via:\n\n```sh\nexport http_proxy=http://alice.example.com:8080\n```\n\nWhich proxy server gets used if troublesome Bob also defines the\nall-caps version, `HTTP_PROXY`?\n\n```sh\nexport HTTP_PROXY=http://bob.example.com:8080\n```\n\nThe answer surprised us: it depends. In some cases, the Alice proxy\nwins, and in other cases Bob wins. We'll discuss the details later.\n\n### Defining proxy exemptions with `no_proxy`\n\nWhat happens if you want to make exceptions? For example, suppose you\nwant to use a proxy server for everything but `internal.example.com` and\n`internal2.example.com`. That's where the `no_proxy` variable comes into\nplay. Then you would define `no_proxy` as follows:\n\n```sh\nexport no_proxy=internal.example.com,internal2.example.com\n```\n\n### Handling IP exclusions in `no_proxy`\n\nWhat if you want to exclude IP addresses? Can you use asterisks or\nwildcards? Can you use CIDR blocks (e.g. `192.168.1.1/32`)? The answer\nagain: it depends.\n\n## How did we get here?\n\nLet's dig into the evolution of proxy variables, and how they are used today.\n\n### The origins of proxy variables\n\nIn 1994, most Web clients used CERN's `libwww`, which [supported `http_proxy` and the `no_proxy` environment variables](https://courses.cs.vt.edu/~cs4244/spring.09/documents/Proxies.pdf).\n`libwww` only used the lowercase form of `http_proxy`, and the [`no_proxy` syntax was\nsimple](https://github.com/w3c/libwww/blob/8678b3dcb4191065ca39caea54bb1beba809a617/Library/src/HTAccess.c#L234-L239):\n\n```\nno_proxy is a comma- or space-separated list of machine\nor domain names, with optional :port part.  If no :port\npart is present, it applies to all ports on that domain.\n\nExample:\n\t\tno_proxy=\"cern.ch,some.domain:8001\"\n```\n\nNew clients emerged that added their own HTTP implementations without\nlinking `libwww`. In January 1996, Hrvoje Niksic released\n`geturl`, the predecessor of what is now `wget`.  A month later,\n`geturl`, [added support for `http_proxy` in v1.1](https://ftp.sunet.se/mirror/archive/ftp.sunet.se/pub/www/utilities/wget/old-versions/).\nIn May 1996, `geturl` v1.3 added support for `no_proxy`. Just as with\n`libwww`, `geturl` only supported the lowercase form.\n\nIn January 1998, Daniel Stenberg released `curl` v5.1, which [supported the `http_proxy` and `no_proxy` variables](https://github.com/curl/curl/blob/ae1912cb0d494b48d514d937826c9fe83ec96c4d/CHANGES#L929-L944).\nIn addition, `curl` allowed the uppercase forms, `HTTP_PROXY` and `NO_PROXY`.\n\nPlot twist: In March 2009, [curl v7.19.4](https://github.com/curl/curl/releases/tag/curl-7_19_4) dropped support for the\nuppercase form of `HTTP_PROXY` [due to security concerns](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2250-L2261). However, while `curl` ignores `HTTP_PROXY`, `HTTPS_PROXY` still works today.\n\n### State of the variables today\n\nFast-forward to today. As my [colleague Nourdin el Bacha researched](https://gitlab.com/gitlab-com/support/support-team-meta/-/issues/2991),\nwe can see that how these proxy server variables are handled varies, depending\non what language or tool you are using.\n\n## Current implementation of proxy variables across languages\n\nKnowing how proxy variables are handled across languages allows you to set them so that they work properly. Here’s a quick rundown.\n\n### `http_proxy` and `https_proxy`\n\nIn the following table, each row represents a supported behavior, while\neach column holds the tool (e.g. `curl`) or language (e.g. `Ruby`) to\nwhich it applies:\n\n|                 | curl      | wget           | Ruby          | Python    | Go        |\n|-----------------|-----------|----------------|---------------|-----------|-----------|\n| `http_proxy`    | Yes       | Yes            | Yes           | Yes       | Yes       |\n| `HTTP_PROXY`    | No        | No             | Yes ([warning](https://github.com/ruby/ruby/blob/0ed71b37fa9af134fdd5a7fd1cebd171eba83541/lib/uri/generic.rb#L1519)) | Yes (if `REQUEST_METHOD` not in env)       | Yes       |\n| `https_proxy`   | Yes       | Yes            | Yes           | Yes       | Yes       |\n| `HTTPS_PROXY`   | Yes       | No             | Yes           | Yes       | Yes       |\n| Case precedence | lowercase | lowercase only | lowercase     | lowercase | Uppercase |\n| Reference       | [source](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2250-L2266) | [source](https://github.com/jay/wget/blob/099d8ee3da3a6eea5635581ae517035165f400a5/src/retr.c#L1222-L1239) | [source](https://github.com/ruby/ruby/blob/0ed71b37fa9af134fdd5a7fd1cebd171eba83541/lib/uri/generic.rb#L1474-L1543) | [source](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2488-L2517) | [source](https://github.com/golang/go/blob/682a1d2176b02337460aeede0ff9e49429525195/src/vendor/golang.org/x/net/http/httpproxy/proxy.go#L82-L97) |\n\u003Cbr>\u003C/br>\nNote that `http_proxy` and `https_proxy` are always supported across the\nboard, while `HTTP_PROXY` is not always supported. Python (via `urllib`) complicates\nthe picture even more: `HTTP_PROXY` can be used [as long as\n`REQUEST_METHOD` is not defined in the environment](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2504-L2508).\n\nWhile you might expect environment variables to be all-caps,\n`http_proxy` came first, so that's the de facto standard. When in doubt,\ngo with the lowercase form because that's universally supported.\n\nInstead of environment variables, Java uses [system properties](https://docs.oracle.com/javase/8/docs/technotes/guides/net/proxies.html). This avoids case issues entirely.\n\nUnlike most implementations, Go tries the uppercase version before\nfalling back to the lowercase version. We will see later why that caused\nissues for one GitLab customer.\n\n### `no_proxy` format\n\nSome users have [discussed the lack of the `no_proxy` specification in this issue](https://github.com/curl/curl/issues/1208). As\n`no_proxy` specifies an exclusion list, many questions arise about\nhow it behaves. For example, suppose your `no_proxy` configuration is defined:\n\n```sh\nexport no_proxy=example.com\n```\n\nDoes this mean that the domain must be an exact match, or will\n`subdomain.example.com` also match against this configuration? The\nfollowing table shows the state of various implementations. It turns out\nall implementations will match suffixes properly, as shown in the\n`Matches suffixes?` row:\n\n|                       | curl      | wget           | Ruby      | Python    | Go        |Java |\n|-----------------------|-----------|----------------|-----------|-----------|-----------|\n| `no_proxy`            | Yes       | Yes            | Yes       | Yes       | Yes       |No*|\n| `NO_PROXY`            | Yes       | No             | Yes       | Yes       | Yes       |No*|\n| Case precedence       | lowercase | lowercase only | lowercase | lowercase |Uppercase |N/A|\n| Matches suffixes?     | Yes       | Yes            | Yes       | Yes       | Yes       |No|\n| Strips leading `.`?   | Yes       | No             | Yes       | Yes       | No        |No|\n| `*` matches all hosts?| Yes       | No             | No        | Yes       | Yes       |Yes|\n| Supports regexes?     | No        | No             | No        | No        | No        |No|\n| Supports CIDR blocks? | No        | No             | Yes       | No        | Yes       |No|\n| Detects loopback IPs? | No        | No             | No        | No        | Yes       |No|\n| Resolves IP addresses? | No        | No             | Yes        | No        | Yes       |No|\n| Reference             | [source](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2152-L2206) | [source](https://github.com/jay/wget/blob/099d8ee3da3a6eea5635581ae517035165f400a5/src/retr.c#L1266-L1274) | [source](https://github.com/ruby/ruby/blob/eead83160bcc5f49706e05669e5a7e2620b9b605/lib/uri/generic.rb#L1552-L1577) | [source](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2519-L2551)| [source](https://github.com/golang/go/blob/master/src/vendor/golang.org/x/net/http/httpproxy/proxy.go#L170-L205) |[documentation](https://docs.oracle.com/javase/8/docs/technotes/guides/net/proxies.html)\n\n* Java uses the `http.nonProxyHosts` system property.\n\n### The impact of leading dots in no_proxy\n\nHowever, if there is a leading `.` in the `no_proxy` setting, the\nbehavior varies. For example, `curl` and `wget` behave\ndifferently. `curl` will always strip the leading `.` and match against\na domain suffix. This call bypasses the proxy:\n\n```sh\n$ env https_proxy=http://non.existent/ no_proxy=.gitlab.com curl https://gitlab.com\n\u003Chtml>\u003Cbody>You are being \u003Ca href=\"https://about.gitlab.com/\">redirected\u003C/a>.\u003C/body>\u003C/html>\n```\n\nHowever, `wget` does not strip the leading `.` and performs an exact\nstring match against a hostname. As a result, `wget` attempts to use a\nproxy if a top-level domain is used:\n\n```sh\n$ env https_proxy=http://non.existent/ no_proxy=.gitlab.com wget https://gitlab.com\nResolving non.existent (non.existent)... failed: Name or service not known.\nwget: unable to resolve host address 'non.existent'\n```\n\nIn all implementations, regular expressions are never supported. I\nsuspect using regexes complicates matters further, because regexes have\ntheir own flavors (e.g. PCRE, POSIX, etc.). Using regexes also\nintroduces potential performance and security issues.\n\nIn some cases, setting `no_proxy` to `*` effectively disables proxies\naltogether, but this is not a universal rule.\n\nOnly Ruby performs a DNS lookup to resolve a hostname to an IP address when deciding if a proxy should be used. Be careful if you use IP addresses with Ruby because it’s possible a hostname may resolve to an excluded IP address. In general, do not specify IP addresses in no_proxy variable unless you expect that the IPs are explicitly used by the client.\n\nThe same holds true for CIDR blocks, such as `18.240.0.1/24`. CIDR\nblocks only work when the request is directly made to an IP\naddress. Only Go and Ruby allow CIDR blocks. Unlike other\nimplementations, Go even automatically disables the use of a proxy if it\ndetects a loopback IP addresses.\n\n## Why does this matter?\nDiscrepancies in proxy environment variable handling, particularly between Ruby and Go, can lead to a real-world issues where Git pushes worked via the command line but failed in the web UI for a GitLab customer. Understanding these inconsistencies is crucial for troubleshooting and configuring applications that operate across multiple languages within corporate networks utilizing proxy servers.\n\n### Challenges of defining proxy variables in multi-language applications\n\nIf you have an application written in multiple languages that needs to\nwork behind a corporate firewall with a proxy server, you may need to\npay attention to these differences. For example, GitLab is composed of a\nfew services written in Ruby and Go. One customer set its proxy\nconfiguration to something like the following:\n\n```yaml\nHTTP_PROXY: http://proxy.company.com\nHTTPS_PROXY: http://proxy.company.com\nNO_PROXY: .correct-company.com\n```\n\nThe customer reported the following issue with GitLab:\n\n1. A `git push` from the command line worked\n1. Git changes made via the Web UI failed\n\nOur support engineers discovered that due to a Kubernetes configuration\nissue, a few stale values lingered. The pod actually had an environment\nthat looked something like:\n\n```yaml\nHTTP_PROXY: http://proxy.company.com\nHTTPS_PROXY: http://proxy.company.com\nNO_PROXY: .correct-company.com\nno_proxy: .wrong-company.com\n```\n### How inconsistent proxy settings can cause failures\n\nThe inconsistent definitions in `no_proxy` and `NO_PROXY` set off red\nflags, and we could have resolved the issue by making them consistent or\nremoving the incorrect entry. But let's drill into what happened.\nRemember from above that:\n\n1. Ruby tries the lowercase form first\n1. Go tries the uppercase form first\n\nAs a result, services written in Go, such as GitLab Workhorse, had the\ncorrect proxy configuration. A `git push` from the command line worked\nfine because the Go services primarily handled this activity:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant W as Workhorse\n    participant G as Gitaly\n    C->>W: 1. git push\n    W->>G: 2. gRPC: PostReceivePack\n    G->>W: 3. OK\n    W->>C: 4. OK\n```\n\nThe gRPC call in step 2 never attempted to use the proxy because\n`no_proxy` was configured properly to connect directly to Gitaly.\n\nHowever, when a user makes a change in the UI, Gitaly forwards the\nrequest to a `gitaly-ruby` service, which is written in\nRuby. `gitaly-ruby` makes changes to the repository and [reports back\nvia a gRPC call back to its parent process](https://gitlab.com/gitlab-org/gitaly/-/issues/3189).  However,\nas seen in step 4 below, the reporting step didn't happen:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant R as Rails\n    participant G as Gitaly\n    participant GR as gitaly-ruby\n    participant P as Proxy\n    C->>R: 1. Change file in UI\n    R->>G: 2. gRPC: UserCommitFiles\n    G->>GR: 3. gRPC: UserCommitFiles\n    GR->>P: 4. CONNECT\n    P->>GR: 5. FAIL\n```\n\nBecause gRPC uses HTTP/2 as the underlying transport, `gitaly-ruby`\nattempted a CONNECT to the proxy since it was configured with the wrong\n`no_proxy` setting. The proxy immediately rejected this HTTP request,\ncausing the failure in the Web UI push case.\n\n### Correcting proxy configuration issues\n\nOnce we eliminated the lowercase `no_proxy` from the environment, pushes\nfrom the UI worked as expected, and `gitaly-ruby` connected directly to\nthe parent Gitaly process. Step 4 worked properly in the diagram below:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant R as Rails\n    participant G as Gitaly\n    participant GR as gitaly-ruby\n    participant P as Proxy\n    C->>R: 1. Change file in UI\n    R->>G: 2. gRPC: UserCommitFiles\n    G->>GR: 3. gRPC: UserCommitFiles\n    GR->>G: 4. OK\n    G->>R: 5. OK\n    R->>C: 6. OK\n```\n\n## A surprising discovery with gRPC\n\nWe also discovered that gRPC does not [support HTTPS proxies](https://github.com/grpc/grpc/issues/20939). This again subtly affects the behavior of the system depending on how `HTTPS_PROXY` is set.\n\n### gRPC behavior with `HTTPS_proxy`\n\nNote that the customer set `HTTPS_PROXY` to an unencrypted HTTP proxy;\nnotice that `http://` is used instead of `https://`. While this isn't\nideal from a security standpoint, some people do this to avoid the\nhassle of clients failing due to TLS certificate verification issues.\n\nIronically, if an HTTPS proxy were specified, we would not have seen\nthis problem. If an HTTPS proxy is used, gRPC will ignore this setting\nsince HTTPS proxies are not supported.\n\n### The lowest common denominator\n\nI think we can all agree that one should never define inconsistent\nvalues with lowercase and uppercase proxy settings. However, if you ever\nhave to manage a stack written in multiple languages, you might need to\nconsider setting HTTP proxy configurations to the lowest common\ndenominator.\n\n1. `http_proxy` and `https_proxy`\n\n* Use lowercase form. `HTTP_PROXY` is not always supported or recommended.\n    * If you _absolutely must_ use the uppercase form as well, be **sure** they share the same value.\n\n2. `no_proxy`\n\n1. Use lowercase form.\n1. Use comma-separated `hostname:port` values.\n1. IP addresses are okay, but hostnames are never resolved.\n1. Suffixes are always matched (e.g. `example.com` will match `test.example.com`).\n1. If top-level domains need to be matched, avoid using a leading dot (`.`).\n1. Avoid using CIDR matching since only Go and Ruby support that.\n\n## Steps toward standardizing `no_proxy`\n\nKnowing the least common denominator can help avoid issues if these\ndefinitions are copied for different Web clients. But should `no_proxy`\nand the other proxy settings have a documented standard rather than an\nad hoc convention? The list below may serve as a starting point for a\nproposal:\n\n1. Prefer lowercase forms over uppercase variables (e.g. `http_proxy` should be searched before `HTTP_PROXY`).\n1. Use comma-separated `hostname:port` values.\n    * Each value may include optional whitespace.\n1. Never perform DNS lookups or use regular expressions.\n1. Use `*` to match **all** hosts.\n1. Strip leading dots (`.`) and match against domain suffixes.\n1. Support CIDR block matching.\n1. Never make assumptions about special IP addresses (e.g. loopback addresses in `no_proxy`).\n\n## Key takeaways on proxy standardization\n\nIt's been over 25 years since the first Web proxy was released. While\nthe basic mechanics of configuring a Web client via environment\nvariables have not changed much, a number of subtleties have emerged\nacross different implementations. We saw for one customer, erroneously\ndefining conflicting `no_proxy` and `NO_PROXY` variables led to hours of\ntroubleshooting due to the differences with which Ruby and Go parse\nthese settings. We hope highlighting these differences will avoid future\nissues in your production stack, and we hope that Web client maintainers\nwill standardize the behavior to avoid such issues in the first place.\n",[269,1372,9,1225],{"slug":1812,"featured":6,"template":700},"we-need-to-talk-no-proxy","content:en-us:blog:we-need-to-talk-no-proxy.yml","We Need To Talk No Proxy","en-us/blog/we-need-to-talk-no-proxy.yml","en-us/blog/we-need-to-talk-no-proxy",{"_path":1818,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1819,"content":1825,"config":1830,"_id":1832,"_type":14,"title":1833,"_source":16,"_file":1834,"_stem":1835,"_extension":19},"/en-us/blog/welcome-kde",{"title":1820,"description":1821,"ogTitle":1820,"ogDescription":1821,"noIndex":6,"ogImage":1822,"ogUrl":1823,"ogSiteName":686,"ogType":687,"canonicalUrls":1823,"schema":1824},"Why the KDE community is #movingtogitlab","Open source software community giant KDE finished phase one of their migration to GitLab and has joined our GitLab open source program. Check out what's next for KDE and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681382/Blog/Hero%20Images/migratingbirds.jpg","https://about.gitlab.com/blog/welcome-kde","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the KDE community is #movingtogitlab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2020-06-29\",\n      }",{"title":1820,"description":1821,"authors":1826,"heroImage":1822,"date":1827,"body":1828,"category":694,"tags":1829},[1347],"2020-06-29","\n\nThe [KDE community](https://kde.org/) is [#movingtogitlab](https://twitter.com/hashtag/movingtogitlab)! After announcing the original decision to migrate to GitLab in November 2019, KDE has officially completed phase one of their migration, and contributors have begun to use GitLab on a daily basis at invent.kde.org. Read on to learn more about KDE's migration story.\n\n## About KDE\n\nKDE is an international community that creates open source software for desktops and mobile devices. KDE software is compatible with multiple platforms, including GNU/Linux, FreeBSD, Windows, macOS, and Android. Their products are used by millions of home and office workers and are being deployed in schools around the world.\n\nWith more than 2,700 artists, designers, programmers, translators, writers, and other contributors from across the globe, the KDE community is thriving.\n\nTogether, this community creates and maintains more than 200 applications and countless add-ons, plugins, and Plasmoids, 1000+ repositories, 80+ frameworks for Qt developers, and more than 2,600 projects. KDE software is translated into more than 100 languages to enable vast global reach.\n\n## Why KDE moved to GitLab\n\nOne of the main reasons that KDE decided to move to GitLab is to improve the newcomers story and make it easier to start contributing to KDE software. As [Aleix Pol](https://ev.kde.org/corporate/board/), President of KDE e.V says, \"Adopting GitLab has been a natural next step for us. Simplifying the onboarding experience for new contributors is one of our main goals in the KDE community. Being able to allow project contributors to easily participate in how the products they maintain are tested and delivered will certainly be a turning point for our ecosystem.\"\n\n\"By using a platform offering an interface and workflow that most open source developers are nowadays familiar with, we are confident that we are lowering the bar for new contributors to join us, and are providing the foundation for our community to scale in the following years,\" added [Neofytos Kolokotronis](https://ev.kde.org/corporate/board/), member of KDE e.V.'s Board of Directors and a core member of KDE's Onboarding team.\n\nAnother important consideration for the KDE community was to move to a product that was well-supported and where feedback from the community would be taken into account. With a release every month, GitLab has fast-paced development and is actively maintained by the company and community alike. Community members help to shape the way the product is built, and there's an [open roadmap](/direction/) since [transparency is one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#transparency).\n\nMoving to new tools is a lot of work for established communities like KDE. Migration decisions require careful communication and the complex task of gathering community consensus.\n\nThe KDE team made the decision to migrate away from its [former tech stack](https://gitlab.com/gitlab-org/gitlab/-/issues/24900#gitlab-replacements) after following a series of carefully designed steps. First, they talked to the sysadmin team and then formed a migration team to evaluate the move. Next, the sysadmin team completed a thorough study of GitLab's features and did an intake and comparison of the community's needs against those product features. Then, they created a process that allows KDE to run short test cycles with some projects, document the process, and provide feedback to the community.\n\nThe migration started by moving some smaller and more agile KDE teams that were very interested in testing and providing feedback. After this cycle was completed successfully, KDE started migrating teams with a larger codebase and more contributors. Once all of the major issues were resolved, they made the final switch for all remaining projects they planned to move. The sysadmin team documented the results after each step and shared them directly with the KDE community to receive feedback and gather consensus on how to proceed.\n\nAs the switch to GitLab fell directly under the scope of KDE's [\"Streamlined Onboarding of New Contributors\" goal](https://community.kde.org/Goals/Streamlined_onboarding_of_new_contributors), the KDE Onboarding team was also involved from the start, working very closely with the sysadmin team, who were leading the effort. The community was involved in the decision-making from the beginning, and stayed up-to-date on each phase of the migration, and all questions and concerns were answered and addressed along the way.\n\n\"This was a major change for us, but we are very satisfied with how our community collaborated over long discussion threads. We believe that by working together we made the best decisions as we moved forward,\" says Neofytos.\n\n## Migration challenges and solutions\n\nThe biggest challenge for KDE was the sheer volume of data they were dealing with and how it was integrated into the numerous tools in use (including [Phabricator](https://www.phacility.com/phabricator/)). With more than 1,000 repositories, this migration was a big undertaking.\n\nTo address this challenge, KDE decided to approach the migration in phases rather than do it all at once. By phasing the migration, they were able to deal with different data types, such as repositories and tasks, separately.\n\nKDE developed custom tools to make bulk updates easier throughout the migration process. These tools help set the name, description, and avatar of the projects alongside a number of settings, for example, protected branches, and merge methods. By using these custom tools for bulk updates, KDE was also able to avoid granting maintainer access to individual contributors. KDE only allows maintainer access for sysadmins per their access and permissions policy.\n\nKDE ported custom Git hooks to ensure that certain checks and actions continued after the move to Gitlab. These include checks to ensure file encodings match KDE requirements and that bugs on their Bugzilla installation were closed as needed.\n\nIn order to support their translation community, which still uses Subversion in their workflow, KDE also built tooling to export SSH keys from GitLab to avoid the need to update these in two places.\n\nKDE also adjusted the tools used to build and develop KDE software to make them compatible with the new repository structure in GitLab.\n\nAt this point, KDE overcame most of their migration hurdles. Once the preparation work was finished to clean up a number of systems to work more natively with GitLab, the actual migration took about one day.\n\nBut there are a few more challenges left before KDE can transition continuous integration (CI) and task management over to GitLab. To follow along with the KDE migration, you can take a look at the [list of issues that KDE is tracking](https://gitlab.com/gitlab-org/gitlab/-/issues/26581).\n\n## Architectural decisions\n\nA common challenge for organizations moving to GitLab is deciding how to structure their groups to best enable their community's workflows and allow them to abide by their policies.\n\nKDE decided to tackle this challenge by setting up a series of groups at the top level of GitLab to act as categories. KDE's 1,200 repositories were then sorted into each of these categories.\n\nKDE formed this architectural strategy to help make projects more discoverable. KDE wanted to avoid the impracticality of people needing to scroll endlessly through repositories. Setting up top-level categories also allows developers to get an easier overview of merge requests for the categories they are most interested in.\n\nWith regards to permissions, KDE uses a single master \"KDE Developers\" group to manage membership and permission levels. Everyone there is given \"Developer\" access. This group is then invited to all of the groups containing repositories except for the ones containing the KDE website and infrastructure repos. This method of dealing with permissions allows KDE to maintain a single source of truth.\n\n## GitLab + KDE = ❤️\n\nKDE is using the [Community Edition](/install/ce-or-ee/) of GitLab because of their commitment to open source. They are a member of our [GitLab for Open Source](/solutions/open-source/) program, and have been actively collaborating with GitLab team members throughout the migration. One of benefits of using the GitLab for Open Source program for large migration efforts is that the community often offers some extra assistance through the evaluation period and beyond.\n\nFor example, the GitLab for Open Source program has a [public tracker for KDE's migration](https://gitlab.com/gitlab-org/gitlab/-/issues/24900), which is used to communicate and better understand at a glance the issues that are especially important. This allows KDE, GitLab, and the larger open source community to collaborate on challenges together.\n\n\"GitLab's values of [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) really shine through,\" says Neofytos. We appreciate their openness to accepting merge requests from community members and considering proposals for new features. We have had a great experience so far collaborating with members of the GitLab community and members of the GitLab team – from developers to program managers to product owners alike.\"\n\nNow that phase one of the KDE migration is complete, we look forward to continuing to collaborate with KDE through the remaining phases of the migration.\n\n### Summary of the KDE migration\n\n * Phase 1: Code hosting & review ✅\n * Phase 2: CI\n * Phase 3: Task management for developers\n\n## How to contribute to KDE\n\nKDE has an amazing community and they welcome new members! Existing members are happy to provide feedback on newcomers' contributions with the goal of helping them learn. Every day more and more people join the ever-growing KDE family – and there's always room for more!\n\nKDE has a rich infrastructure of web resources, forums, mailing-lists, IRC (chat), and many other ways to get in touch. To learn more about joining the KDE community, visit their \"[Get Involved](https://community.kde.org/Get_Involved)\" page, which offers guidance to all contributors from all backgrounds.\n\n",[697,9,830],{"slug":1831,"featured":6,"template":700},"welcome-kde","content:en-us:blog:welcome-kde.yml","Welcome Kde","en-us/blog/welcome-kde.yml","en-us/blog/welcome-kde",{"_path":1837,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1838,"content":1844,"config":1850,"_id":1852,"_type":14,"title":1853,"_source":16,"_file":1854,"_stem":1855,"_extension":19},"/en-us/blog/what-south-africa-taught-me-about-cybersecurity",{"title":1839,"description":1840,"ogTitle":1839,"ogDescription":1840,"noIndex":6,"ogImage":1841,"ogUrl":1842,"ogSiteName":686,"ogType":687,"canonicalUrls":1842,"schema":1843},"What our summit in South Africa taught me about cybersecurity","Cybersecurity is a necessity, but it's often treated as an afterthought. What it has in common with modern photography could tell us how to make it less painful to achieve.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671140/Blog/Hero%20Images/south-africa-cyber-security.jpg","https://about.gitlab.com/blog/what-south-africa-taught-me-about-cybersecurity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What our summit in South Africa taught me about cybersecurity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2018-09-11\",\n      }",{"title":1839,"description":1840,"authors":1845,"heroImage":1841,"date":1847,"body":1848,"category":804,"tags":1849},[1846],"Cindy Blake","2018-09-11","\nThe GitLab team [summit](/events/gitlab-contribute/) recently took place in Cape Town, South Africa, which, as you can imagine, promised to be memorable.\nWhen preparing to cross three continents over 22 hours on airplanes, you think carefully about what to pack. You are anticipating the most beautiful scenery ever and want to make sure you capture it in pictures. So you find your camera – the one you haven't used in a long time because you've grown accustomed to using your cellphone. After careful debate, you decide to take it because those awesome experiences and scenery deserve the best camera.\n\nThe camera requires certain things: You have to make sure it's charged or has batteries; it needs to have adequate storage; you may need additional lenses which also require special care. Everything must be protected and carefully packed, and requires additional space and weight to carry on board with you because it's too precious to put in checked baggage.\n\nWhen you get to South Africa and you see this incredible scenery, you take out your wonderful camera and you realize a few things:\n\n## 1. You have only a precious few moments to capture the image\n\nDo I really have time to fidget with f-stops and customizations, or do I just want to capture the picture and perhaps customize it a bit later by cropping and adjusting the light?\n\n## 2. It's difficult to share camera photos immediately\n\nI'm anxious to share these images with friends and family back home. It occurs to me that with my phone I can share images immediately and effortlessly by email, text, Slack, or a variety of social media. If I take the pictures on my expensive camera, I can't share them immediately because it's not connected to anything. I'll have to wait until I get back to my hotel room so I can take the flash drive and put it in my laptop, log on to the Wi-Fi, and then share my images.\n\n## 3. My camera photos aren't secure\n\nIf I lose that flash drive, all of my images are gone (unless I back them up immediately after capturing them – not likely!). While it is possible I could lose my cell phone and lose my pictures, it's less likely. My phone is an integral part of my daily workflow – an appendage even. How often do we feel naked if we forget our phone at home or even in the other room? I'm much less likely to leave my phone on the bus when I get off to explore then to leave my camera behind.\n\nSo, I choose to use my phone to capture these magnificent images. My primary objective is not taking fabulous pictures worthy of publication that I can sell or frame on my wall, but to take pictures that are good enough, that capture the special place, and that I can share with friends and family easily and effortlessly. If it's too hard to share, I may not do it, or it may take me a long time. In addition, I don't have to think ahead about how my phone will capture an image in such a way to send it to friends; the images automatically integrate with all of the other sharing mechanisms on my phone. It simply works. I am free to focus on my primary effort of capturing the images while I soak up the moments.\n\n## Now, how does this relate to cybersecurity?\n\nCompanies invest a great deal in [application security to test their software](/topics/devsecops/) for security vulnerabilities. It's a separate application that requires its own budget and maintenance. Like the specialized camera, the information it creates must be shared in order to be most useful. The security team can use it by itself, but to be truly effective, the vulnerabilities found must be shared with development so that they can be corrected. Yet developers have little interest in logging onto a security system to access the data. Would your friends and family want to physically turn on your camera to look at your pictures? Maybe, but it's very limiting as to whom you can reach.\n\nThe challenge then is how do you get the data found by the application security system into the hands of the developers? Today that is one of the greatest challenges to overcome, even in rare cases where the objectives of security and dev totally align.\n\n### What if you looked at application security the same way we look at photographing images?\n\nIs the prime objective to do the most eloquent job of finding the vulnerabilities? Or, is the prime objective to get the vulnerabilities that we do find fixed? If it is the latter then the primary issue must be integrating with the developers’ workflow.\n\nWith [GitLab application security testing](/solutions/security-compliance/), it is like the camera on your phone – maybe not superior to a dedicated tool in isolation, but good quality, and more importantly, integrated into the workflow to be the most useful. It is easily and efficiently used without added thought. With GitLab, every commit and every merge request is tested. There isn't even a separate step – it's all automated for you without additional effort.\n\n![GitLab security dashboard](https://about.gitlab.com/images/blogimages/security-dashboard.png){: .shadow.medium.center}\n\nAs with photography, the most important thing is that you capture those moments before they escape you; with application security testing, it's important that you capture those vulnerabilities so that you can act upon them. With GitLab, the vulnerabilities are shown right there in the developers' workflow. They don't have to log into a different system nor interrupt their work. The security vulnerabilities are shown right alongside any other application flaws in the pipeline results of each merge request. The developer can choose to fix them now or continue the build, but either way, the vulnerabilities are captured and logged. And now with the security dashboard, the security team can evaluate further and create an issue for remediation if needed.\n\n>The vulnerabilities are shown right there in the developers' workflow. They don't have to log into a different system nor interrupt their work\n\nThis really does turn application security on its head! It puts the insight and tools for action into the hands of the developer and then shares results with security, rather than the other way around. It makes so much more sense because the developer must do the remediation, not the security pro. Imagine the efficiency gains if most of the effort was placed on eliminating the vulnerabilities up front, rather than on finding and tracking them later in the SDLC! Sound familiar? This has been imagined before and cost savings even estimated. It's the \"shift left\" mantra. While everyone embraces it, few actually achieve it. Why? Because they lack the tools to enable such a seismic shift where the only gate is the merge request.\n\nAlbert Einstein said that the definition of insanity was doing the same thing and expecting a different result. So how can we expect traditional application security methods to meet the needs of modern, cloud-first DevOps environments? We can't. With GitLab, our single application helps users efficiently develop and deploy secure code by leveraging the power of integration across the entire SDLC. [No more stitching together complex DevOps tool chains](/). Microsoft did something similar years ago. Remember Word Perfect? It succumbed to Word because content could be copied/pasted and integrated across the Microsoft suite of documents, spreadsheets and slides. GitLab is on track to do the same thing for software development – including application security testing.\n\n_What do you think? Is this a new era of app security?_\n\nPhoto by [Clyde Thoma](https://unsplash.com/photos/8plz1xK_Wmk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyTexts) on [Unsplash](https://unsplash.com/search/photos/cape-town?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[720,830,9],{"slug":1851,"featured":6,"template":700},"what-south-africa-taught-me-about-cybersecurity","content:en-us:blog:what-south-africa-taught-me-about-cybersecurity.yml","What South Africa Taught Me About Cybersecurity","en-us/blog/what-south-africa-taught-me-about-cybersecurity.yml","en-us/blog/what-south-africa-taught-me-about-cybersecurity",{"_path":1857,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1858,"content":1864,"config":1870,"_id":1872,"_type":14,"title":1873,"_source":16,"_file":1874,"_stem":1875,"_extension":19},"/en-us/blog/working-with-yaml-gitlab-ci-android",{"title":1859,"description":1860,"ogTitle":1859,"ogDescription":1860,"noIndex":6,"ogImage":1861,"ogUrl":1862,"ogSiteName":686,"ogType":687,"canonicalUrls":1862,"schema":1863},"Working with YAML in GitLab CI from the Android perspective","Guest author Renato Stanic shares a sample YAML configuration for Android projects, which helps his team with faster, more iterative development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665524/Blog/Hero%20Images/yaml-gitlab-ci-android.png","https://about.gitlab.com/blog/working-with-yaml-gitlab-ci-android","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Working with YAML in GitLab CI from the Android perspective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Renato Stanic\"}],\n        \"datePublished\": \"2017-11-20\",\n      }",{"title":1859,"description":1860,"authors":1865,"heroImage":1861,"date":1867,"body":1868,"category":741,"tags":1869},[1866],"Renato Stanic","2017-11-20","Using [continuous integration in our everyday\nworkflow](/solutions/continuous-integration/) can help us a lot with faster\nand iterative development, and having CI do checks every time we change our\ncodebase helps us with deal with fear of modifying code.\n\n\n\u003C!-- more -->\n\n\nDeploying app builds manually takes time and leaves us idle while we could\nbe developing new and exciting features instead. Here at Undabot we are\nusing GitLab CI for continuous integration. GitLab CI uses a YAML file for\njob configuration. In this blog post we will go through a sample YAML\nconfiguration for Android projects and describe the main YAML building\nblocks with common Android CI jobs.\n\n\n### YAML intro\n\nThe YAML file defines a set of jobs with constraints stating when they\nshould be run. The jobs are defined as top-level elements with a name and\nalways have to contain at least the `script` clause:\n\n\n```\n\nhelloworld_job:\n  script: \"echo Hello World!\"\n\nassemble_job:\n  script: \"./gradlew assembleRelease\"\n```\n\n\nYAML syntax allows for more complex job definitions than in the above\nexample:\n\n\n```\n\nbefore_script:\n  - bundle install\n\nafter_script:\n  - rm secrets\n\nstages:\n  - build\n  - test\n  - deploy\n\nhelloworld_job:\n  stage: build\n  script:\n    - echo Hello World\n  only:\n    - master\n  tags:\n    - android\n```\n\n\n`before_script` – commands that run before each jobs script\n\n`after_script` – commands that run after each jobs script\n\n`stages` – used to define build stages\n\n`only` – defines the names of branches and tags for which the job will run\n\n`tags` – used to select specific Runners from the list of all Runners that\nare allowed to run this project.\n\n\n## Initial setup for Android\n\n\nFirst step is to create a YAML file called `gitlab-ci.yml` in root directory\nof your Android project and add the following code:\n\n\n```\n\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\nstages:\n  - build\n  - test\n  - quality_assurance\n  - deploy\n```\n\n\nIn `before_script` we execute these two commands:\n\n`- export ANDROID_HOME=\"$HOME/Library/Android/sdk”`– sets Android home\nenvironment variable to be available for all other jobs and Gradle tasks\n\n`- bundle install` – we are using fastlane for task automation and Bundler\nto manage Ruby gems so we need to run bundle install to make sure everything\nis installed correctly.\n\n\nIn the `stages` section we define four build stages:\n\n`- build`– for build jobs\n\n`- test`– for test jobs that include unit and instrumentation tests\n\n`- quality_assurance`– for jobs that run all of our QA tools\n\n`- deploy`– for deployment jobs\n\n\n## Build stage\n\n\nThis job (`build_job`) is used to create an APK artifact that can be used to\ntest the app manually or to upload it to the Play Store.\n\n\n```\n\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/\n  ```\n\n`build_job:`– name of the CI job\n\n`stage: build`– it gets executed in the build stage\n\n`./gradlew clean assembleRelease`– executes Gradle command to create a\nrelease APK\n\n`artifacts:`– job section that defines list of files and directories that\nare attached to a job after completion.\n\n`paths:`– output file paths\n\n`app/build/outputs`– directory path of our APK\n\n\n## Unit tests\n\n\nThis job (`unit_tests`) runs our unit tests in a test stage. Every time they\nfail, a report artifact will be created. Each report artifact expires within\nfour days of creation.\n\n\n```\n\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n  ```\n\n`./gradlew test`– run Gradle command that triggers our unit tests\nartifacts:\n\n`name:`– defines artifact name by using environment variables\n\n`CI_PROJECT_NAME`– project name that is currently being built\n\n`CI_BUILD_REF_NAME`– branch or tag name for which project is built\n\n`when:`– defines when is it created (on_success, on_failure, always)\n\n`expire_in:`– defines when is it expired, after artifact has expired it gets\ndeleted from CI\n\n\n## Instrumentation tests\n\n\nThis job (`instrumentation_tests`) runs all of our instrumentation tests in\na test stage by starting a windowless emulator without sound and animations\nfollowed by a [custom bash\nscript](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141)\nthat waits for the emulator to start, after which the device is unlocked by\nsending key event 82. When the emulator is ready we run the Gradle command\nfor instrumentation tests. Once all tests finished running, the emulator is\nkilled with a [custom bash\nscript](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141).\n\n\n```\n\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n  ```\n\n`- emulator - avd testAVD -no-audio -no-window &`\n\n`- ./ci/android-wait-for-emulator.sh`\n\nStarts the emulator and waits for it to boot.\n\n`- adb devices`\n\nDisplays list of found devices in GitLab web terminal.\n\n`- adb shell settings put global window_animation_scale 0 &`\n\n`- adb shell settings put global transition_animation_scale 0 &`\n\n`- abd shell settings put global animator_duration_scale 0 &`\n\nDisables all animations and transitions.\n\n\n## Static analysis\n\n\nThis job (`static_analysis`) runs all of static code analysis in QA stage.\nThis is a tricky area especially if you are working on a project with a lot\nof legacy code. My suggestion would be to disable all of the rules and start\nfixing them one at the time. Tools used for static analysis are lint,\ncheckstyle, pmd and findbugs.\n\n\n```\n\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n```\n\n\n`- ./gradlew lint`\n\n`- ./gradlew checkstyle`\n\n`- ./gradlew pmd`\n\n`- ./gradlew findbugs`\n\nGradle commands that trigger QA tools.\n\n`- app/build/reports` – path to our QA reports\n\n\n## Deploy stage\n\n\nThe final job (`deploy_internal`) deploys the app to the QA team in deploy\nstage. You don’t want to deploy every time you commit something so this step\nis set as manual. Manual jobs are triggered via GitLab web interface by\npressing the play button in your pipeline list. If you are using fastlane as\nyour deployment tool, the last job will look like the following code:\n\n\n```\n\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n\n`- bundle exec fastlane android deploy_lane`– executes fastlane deploy lane\nthat deploys app to the QA team\n\n`when: manual` – defines [when is a job\nexecuted](https://docs.gitlab.com/ee/ci/yaml/#when)\n\n\n## There’s plenty more\n\n\nSetting up Android continuous integration with GitLab CI is great and\nsupports plenty of cool features a lot more than we showed. Hopefully this\nshort introduction was helpful and is going to motivate you to discover more\nfeatures on your own.\n\n\nComplete `gitlab-ci.yml`:\n\n\n```\n\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\n\nstages:\n\n- build\n\n- test\n\n- quality_assurance\n\n- deploy\n\n\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n    - app/build/outputs/\n\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n\n_[Working with YAML in GitLab CI from an Android\nperspective](https://blog.undabot.com/working-with-yaml-in-gitlab-ci-from-android-perspective-b8cf54b5b911)\nwas originally published on Undabot's blog._\n",[109,9],{"slug":1871,"featured":6,"template":700},"working-with-yaml-gitlab-ci-android","content:en-us:blog:working-with-yaml-gitlab-ci-android.yml","Working With Yaml Gitlab Ci Android","en-us/blog/working-with-yaml-gitlab-ci-android.yml","en-us/blog/working-with-yaml-gitlab-ci-android",{"_path":1877,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1878,"content":1884,"config":1889,"_id":1891,"_type":14,"title":1892,"_source":16,"_file":1893,"_stem":1894,"_extension":19},"/en-us/blog/wrapping-up-commit",{"title":1879,"description":1880,"ogTitle":1879,"ogDescription":1880,"noIndex":6,"ogImage":1881,"ogUrl":1882,"ogSiteName":686,"ogType":687,"canonicalUrls":1882,"schema":1883},"Wrapping up GitLab Commit","From bagels to bowling with a healthy dose of DevSecOps and CI/CD in between, it was an epic day of learning and sharing at GitLab Commit Brooklyn.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680823/Blog/Hero%20Images/commit-brooklyn-graffiti-cover.jpg","https://about.gitlab.com/blog/wrapping-up-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Wrapping up GitLab Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-18\",\n      }",{"title":1879,"description":1880,"authors":1885,"heroImage":1881,"date":1886,"body":1887,"category":301,"tags":1888},[935],"2019-09-18","\n\n***Relive GitLab Commit Brooklyn through the power of lights, cameras, and a pinch of Tanuki magic. Here's the [full YouTube playlist for the event](https://www.youtube.com/playlist?list=PLFGfElNsQthaaqEAb6ceZvYnZgzSM50Kg)!***\n\nIf there's anything you need to understand about GitLab's first ever user conference, it's this: I started the day with a New York bagel, learned how to create a CI/CD pipeline in just 20 minutes, found out [NASA will take GitLab into space](/blog/open-source-nasa-gl/), and it ended in a bowling alley... yes, it was _that_ kind of day.\n\nWe did a neighborhood takeover of a few blocks in the Williamsburg area of Brooklyn and before I even arrived at the venue, I knew something interesting was happening. There was wall grafitti and street graffiti.\n\n![street graffiti](https://about.gitlab.com/images/blogimages/commitbrooklynstreet.jpg){: .shadow.small.center}\nGitLab has arrived in Brooklyn!\n{: .note.text-center}\n\nOver 400 attendees gathered in brick-and-light-filled meeting spaces for conversation, demonstrations, laughter, and even a screaming chicken (the result of the CI/CD demo). It was an epic day of sharing, learning and exploring that could have felt overwhelming. Instead, the quirky informal spaces seemed to relax everyone and make it easier to actually listen and learn.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">THE live coding keynote is here! \u003Ca href=\"https://twitter.com/eddiezane?ref_src=twsrc%5Etfw\">@eddiezane\u003C/a> of \u003Ca href=\"https://twitter.com/digitalocean?ref_src=twsrc%5Etfw\">@digitalocean\u003C/a> introduces his “startup” Screaming Chicken at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> and shows how he runs it on \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a> AutoDevops, \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> and DO. The audience is riveted! \u003Ca href=\"https://t.co/ibao6ngeNX\">pic.twitter.com/ibao6ngeNX\u003C/a>\u003C/p>&mdash; Priyanka Sharma @ #GitLabCommit Brooklyn! (@pritianka) \u003Ca href=\"https://twitter.com/pritianka/status/1173972101713276928?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nThis was not anyone's typical idea of a user conference: no large, impersonal hotel, no pre-fab food, and no stilted conversations with total strangers. No one spent the day in frigid air conditioning. Instead everyone moved seamlessly from space to space, inside and outside, and it really was refreshing.\n\nLunch was refreshing too. It's not every day a gorilla brings you grilled cheese and tater tots under sunny skies.\n\n![Gorilla Grilled Cheese](https://about.gitlab.com/images/blogimages/commitbrooklyngorilla.jpg){: .shadow.small.center}\nThis was some grilled cheese!\n{: .note.text-center}\n\nAfter lunch, some people met up with our CEO [Sid Sijbrandij](/company/team/#sytses) while others attended individual tracks.\n\n![Office hours with Sid](https://about.gitlab.com/images/blogimages/commitbrooklynsid.jpg){: .shadow.small.center}\nMeet the CEO!\n{: .note.text-center}\n\nAn open coffee and tea bar (we took over the local coffee shop and my iced chai latte was delicious) fueled lots of conversations about the challenges we all face around DevOps.\n\n![iced chai](https://about.gitlab.com/images/blogimages/commitbrooklynchai.jpg){: .shadow.small.center}\nCheers!\n{: .note.text-center}\n\nAnd then it was time to, well, bowl.\n\n![Bowling](https://about.gitlab.com/images/blogimages/commitbrooklynbowling.jpg){: .shadow.small.center}\nGitLab at Brooklyn Bowl\n{: .note.text-center}\n\nIt might be bragging, but we really do throw a great party (and user conference, for that matter).\n\nIf you'd like to see for yourself, you'll have another chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/#). You can also read about news from Commit: [$268 million in Series E funding, new partners, and more](/blog/live-from-commit-news/), and check out the highlight reel below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/hi2D0Se_VnA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[109,856,279,9,1514],{"slug":1890,"featured":6,"template":700},"wrapping-up-commit","content:en-us:blog:wrapping-up-commit.yml","Wrapping Up Commit","en-us/blog/wrapping-up-commit.yml","en-us/blog/wrapping-up-commit",{"_path":1896,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1897,"content":1903,"config":1908,"_id":1910,"_type":14,"title":1911,"_source":16,"_file":1912,"_stem":1913,"_extension":19},"/en-us/blog/a-beginners-guide-to-continuous-integration",{"title":1898,"description":1899,"ogTitle":1898,"ogDescription":1899,"noIndex":6,"ogImage":1900,"ogUrl":1901,"ogSiteName":686,"ogType":687,"canonicalUrls":1901,"schema":1902},"A beginner's guide to continuous integration","Here's how to help everyone on your team, like designers and testers, get started with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679800/Blog/Hero%20Images/beginners-guide-to-ci.jpg","https://about.gitlab.com/blog/a-beginners-guide-to-continuous-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to continuous integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2018-01-22\",\n      }",{"title":1898,"description":1899,"authors":1904,"heroImage":1900,"date":1905,"body":1906,"category":741,"tags":1907},[1264],"2018-01-22","\n\nAt [fleetster](https://www.fleetster.net/) we have our own instance of GitLab and we rely a lot on [GitLab CI/CD](/topics/ci-cd/). Also our designers and QA guys use (and love) it, thanks to its advanced features.\n\n\u003C!-- more -->\n\nGitLab CI/CD is a very powerful system of [continuous integration (CI)](/solutions/continuous-integration/), with a lot of different features, and with every new release, new features land. It has very rich [technical documentation](https://docs.gitlab.com/ee/ci/), but it lacks a generic introduction for people who want to use it in an existing setup. A designer or a tester doesn’t need to know how to autoscale it with [Kubernetes](/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab/) or the difference between an image or a service.\n\nBut still, they need to [know what a pipeline is](/topics/ci-cd/cicd-pipeline/), and how to see a branch deployed to an environment. In this article therefore I will try to cover as many features as possible, highlighting how the end users can enjoy them; in the last months I explained such features to some members of our team, also developers: not everyone knows what continuous integration is or has used Gitlab CI/CD in a previous job.\n\nIf you want to know why continuous integration is important I suggest reading [this article](/blog/7-reasons-why-you-should-be-using-ci/), while for finding the reasons for using Gitlab CI/CD specifically, I leave the job to [GitLab](/solutions/continuous-integration/) itself.\n\n## Introduction\n\nEvery time developers change some code they save their changes in a commit. They can then push that commit to GitLab, so other developers can review the code.\n\nGitLab will also start some work on that commit, if GitLab CI/CD has been configured. This work is executed by a runner. A runner is basically a server (it can be a lot of different things, also your PC, but we can simplify it as a server) that executes instructions listed in the `.gitlab-ci.yml` file, and reports the result back to GitLab itself, which will show it in his graphical interface.\n\nWhen developers have finished implementing a new feature or a bugfix (activity that usual requires multiple commits), they can open a merge request, where other members of the team can comment on the code and on the implementation.\n\nAs we will see, designers and testers can also (and really should!) join this process, giving feedback and suggesting improvements, especially thanks to two features of GitLab CI: environments and artifacts.\n\n## CI/CD pipelines\n\nEvery commit that is pushed to GitLab generates a pipeline attached to that commit. If multiple commits are pushed together the pipeline will be created for the last one only. A pipeline is a collection of jobs split in different stages.\n\nAll the jobs in the same stage run concurrently (if there are enough runners) and the next stage begins only if all the jobs from the previous stage have finished with success.\n\nAs soon as a job fails, the entire pipeline fails. There is an exception for this, as we will see below: if a job is marked as manual, then a failure will not make the pipeline fail.\n\nThe stages are just a logical division between batches of jobs, where it doesn’t make sense to execute the next job if the previous failed. We can have a `build` stage, where all the jobs to build the application are executed, and a `deploy` stage, where the build application is deployed. Doesn’t make much sense to deploy something that failed to build, does it?\n\nEvery job shouldn’t have any dependency with any other job in the same stage, while they can expect results by jobs from a previous stage.\n\nLet’s see how GitLab shows information about stages and stages’ status.\n\n\u003Cimg src=\"/images/blogimages/pipeline-overview.png\" alt=\"Pipeline overview\" style=\"width: 700px;\"/>{: .shadow}\n\n\u003Cimg src=\"/images/blogimages/pipeline-status.png\" alt=\"Pipeline status\" style=\"width: 700px;\"/>{: .shadow}\n\n## What is a CI job?\n\nA job is a collection of instructions that a runner has to execute. You can see in real time what the output of the job is, so developers can understand why a job fails.\n\nA job can be automatic, so it starts automatically when a commit is pushed, or manual. A manual job has to be triggered by someone manually. This can be useful, for example, to automate a deploy, but still to deploy only when someone manually approves it. There is a way to limit who can run a job, so only trustworthy people can deploy, to continue the example before.\n\nA job can also build artifacts that users can download, like it creates an APK you can download and test on your device; in this way both designers and testers can download an application and test it without having to ask for help to developers.\n\nOther than creating artifacts, a job can deploy an environment, usually reachable by an URL, where users can test the commit.\n\nJob status are the same as stages status: indeed stages inherit theirs status from the jobs.\n\n\u003Cimg src=\"/images/blogimages/running-job.png\" alt=\"Running job\" style=\"width: 700px;\"/>{: .shadow}\n\n## Artifacts\n\nAs we said, a job can create an artifact that users can download to test. It can be anything, like an application for Windows, an image generated by a PC, or an APK for Android.\n\nSo you are a designer, and the merge request has been assigned to you: you need to validate the implementation of the new design!\n\nBut how to do that?\n\nYou need to open the merge request, and download the artifact, as shown in the figure.\n\nEvery pipeline collects all the artifacts from all the jobs, and every job can have multiple artifacts. When you click on the download button, a dropdown will appear where you can select which artifact you want. After the review, you can leave a comment on the MR.\n\nYou can also always download the artifacts from pipelines that do not have a merge request open ;-)\n\nI am focusing on merge requests because usually that is where testers, designers, and shareholders in general enter the workflow.\n\nBut merge requests are not linked to pipelines: while they integrate nicely with one another, they do not have any relation.\n\n\u003Cimg src=\"/images/blogimages/download-artifacts.png\" alt=\"Download artifacts\" style=\"width: 700px;\"/>{: .shadow}\n\n## CI/CD environments\n\nIn a similar way, a job can deploy something to an external server, so you can reach it through the merge request itself.\n\nAs you can see, the environment has a name and a link. Just by clicking the link you to go to a deployed version of your application (of course, if your team has set it up correctly).\n\nYou can also click on the name of the environment, because GitLab also has other cool features for environments, like [monitoring](https://gitlab.com/help/ci/environments.md).\n\n\u003Cimg src=\"/images/blogimages/environment.png\" alt=\"environment\" style=\"width: 700px;\"/>{: .shadow}\n\n## Conclusion\n\nThis was a small introduction to some of the features of GitLab CI: it is very powerful, and using it in the right way allows all the team to use just one tool to go from planning to deploying. A lot of new features are introduced every month, so keep an eye on the [GitLab blog](/blog/).\n\nFor setting it up, or for more advanced features, take a look at the [documentation](https://docs.gitlab.com/ee/ci/).\n\nIn fleetster we use it not only for running tests, but also for having automatic versioning of the software and automatic deploys to testing environments. We have automated other jobs as well (building apps and publishing them on the Play Store and so on).\n\n\n## About the guest author\n\nRiccardo is a university student and a part-time developer at [fleetster](https://www.fleetster.net/). When not busy with university or work, he likes to contribute to open source projects.\n\n *[An introduction to continuous integration](https://rpadovani.com/introduction-gitlab-ci) was originally published on rpadovani.com.*\n\n*Cover photo by [Mike Tinnion](https://unsplash.com/photos/3ym6i13Y9LU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n{: .note}\n",[109,9],{"slug":1909,"featured":6,"template":700},"a-beginners-guide-to-continuous-integration","content:en-us:blog:a-beginners-guide-to-continuous-integration.yml","A Beginners Guide To Continuous Integration","en-us/blog/a-beginners-guide-to-continuous-integration.yml","en-us/blog/a-beginners-guide-to-continuous-integration",7,[679,705,728,750,770,791,815,840,863],1758326264989]