[{"data":1,"prerenderedAt":1992},["ShallowReactive",2],{"/en-us/blog/tags/cd/":3,"navigation-en-us":20,"banner-en-us":450,"footer-en-us":467,"CD-tag-page-en-us":677},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/cd","tags",false,"",{"tag":9,"tagSlug":10},"CD","cd",{"template":12},"BlogTag","content:en-us:blog:tags:cd.yml","yaml","Cd","content","en-us/blog/tags/cd.yml","en-us/blog/tags/cd","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":446,"_type":14,"title":447,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":377,"minimal":408,"duo":427,"pricingDeployment":436},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,187,192,298,358],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":169},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,148],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,138,143],{"text":134,"config":135},"Application Security Testing",{"href":136,"dataGaName":137,"dataGaLocation":28},"/solutions/application-security-testing/","Application security testing",{"text":139,"config":140},"Software Supply Chain Security",{"href":141,"dataGaLocation":28,"dataGaName":142},"/solutions/supply-chain/","Software supply chain security",{"text":144,"config":145},"Software Compliance",{"href":146,"dataGaName":147,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":149,"link":150,"items":155},"Measurement",{"config":151},{"icon":152,"href":153,"dataGaName":154,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[156,160,164],{"text":157,"config":158},"Visibility & Measurement",{"href":153,"dataGaLocation":28,"dataGaName":159},"Visibility and Measurement",{"text":161,"config":162},"Value Stream Management",{"href":163,"dataGaLocation":28,"dataGaName":161},"/solutions/value-stream-management/",{"text":165,"config":166},"Analytics & Insights",{"href":167,"dataGaLocation":28,"dataGaName":168},"/solutions/analytics-and-insights/","Analytics and insights",{"title":170,"items":171},"GitLab for",[172,177,182],{"text":173,"config":174},"Enterprise",{"href":175,"dataGaLocation":28,"dataGaName":176},"/enterprise/","enterprise",{"text":178,"config":179},"Small Business",{"href":180,"dataGaLocation":28,"dataGaName":181},"/small-business/","small business",{"text":183,"config":184},"Public Sector",{"href":185,"dataGaLocation":28,"dataGaName":186},"/solutions/public-sector/","public sector",{"text":188,"config":189},"Pricing",{"href":190,"dataGaName":191,"dataGaLocation":28,"dataNavLevelOne":191},"/pricing/","pricing",{"text":193,"config":194,"link":196,"lists":200,"feature":285},"Resources",{"dataNavLevelOne":195},"resources",{"text":197,"config":198},"View all resources",{"href":199,"dataGaName":195,"dataGaLocation":28},"/resources/",[201,234,257],{"title":202,"items":203},"Getting started",[204,209,214,219,224,229],{"text":205,"config":206},"Install",{"href":207,"dataGaName":208,"dataGaLocation":28},"/install/","install",{"text":210,"config":211},"Quick start guides",{"href":212,"dataGaName":213,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":215,"config":216},"Learn",{"href":217,"dataGaLocation":28,"dataGaName":218},"https://university.gitlab.com/","learn",{"text":220,"config":221},"Product documentation",{"href":222,"dataGaName":223,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":225,"config":226},"Best practice videos",{"href":227,"dataGaName":228,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":230,"config":231},"Integrations",{"href":232,"dataGaName":233,"dataGaLocation":28},"/integrations/","integrations",{"title":235,"items":236},"Discover",[237,242,247,252],{"text":238,"config":239},"Customer success stories",{"href":240,"dataGaName":241,"dataGaLocation":28},"/customers/","customer success stories",{"text":243,"config":244},"Blog",{"href":245,"dataGaName":246,"dataGaLocation":28},"/blog/","blog",{"text":248,"config":249},"Remote",{"href":250,"dataGaName":251,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":253,"config":254},"TeamOps",{"href":255,"dataGaName":256,"dataGaLocation":28},"/teamops/","teamops",{"title":258,"items":259},"Connect",[260,265,270,275,280],{"text":261,"config":262},"GitLab Services",{"href":263,"dataGaName":264,"dataGaLocation":28},"/services/","services",{"text":266,"config":267},"Community",{"href":268,"dataGaName":269,"dataGaLocation":28},"/community/","community",{"text":271,"config":272},"Forum",{"href":273,"dataGaName":274,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":276,"config":277},"Events",{"href":278,"dataGaName":279,"dataGaLocation":28},"/events/","events",{"text":281,"config":282},"Partners",{"href":283,"dataGaName":284,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":286,"textColor":287,"text":288,"image":289,"link":293},"#2f2a6b","#fff","Insights for the future of software development",{"altText":290,"config":291},"the source promo card",{"src":292},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":294,"config":295},"Read the latest",{"href":296,"dataGaName":297,"dataGaLocation":28},"/the-source/","the source",{"text":299,"config":300,"lists":302},"Company",{"dataNavLevelOne":301},"company",[303],{"items":304},[305,310,316,318,323,328,333,338,343,348,353],{"text":306,"config":307},"About",{"href":308,"dataGaName":309,"dataGaLocation":28},"/company/","about",{"text":311,"config":312,"footerGa":315},"Jobs",{"href":313,"dataGaName":314,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":314},{"text":276,"config":317},{"href":278,"dataGaName":279,"dataGaLocation":28},{"text":319,"config":320},"Leadership",{"href":321,"dataGaName":322,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":324,"config":325},"Team",{"href":326,"dataGaName":327,"dataGaLocation":28},"/company/team/","team",{"text":329,"config":330},"Handbook",{"href":331,"dataGaName":332,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":334,"config":335},"Investor relations",{"href":336,"dataGaName":337,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":339,"config":340},"Trust Center",{"href":341,"dataGaName":342,"dataGaLocation":28},"/security/","trust center",{"text":344,"config":345},"AI Transparency Center",{"href":346,"dataGaName":347,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":349,"config":350},"Newsletter",{"href":351,"dataGaName":352,"dataGaLocation":28},"/company/contact/","newsletter",{"text":354,"config":355},"Press",{"href":356,"dataGaName":357,"dataGaLocation":28},"/press/","press",{"text":359,"config":360,"lists":361},"Contact us",{"dataNavLevelOne":301},[362],{"items":363},[364,367,372],{"text":35,"config":365},{"href":37,"dataGaName":366,"dataGaLocation":28},"talk to sales",{"text":368,"config":369},"Get help",{"href":370,"dataGaName":371,"dataGaLocation":28},"/support/","get help",{"text":373,"config":374},"Customer portal",{"href":375,"dataGaName":376,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":378,"login":379,"suggestions":386},"Close",{"text":380,"link":381},"To search repositories and projects, login to",{"text":382,"config":383},"gitlab.com",{"href":42,"dataGaName":384,"dataGaLocation":385},"search login","search",{"text":387,"default":388},"Suggestions",[389,391,395,397,401,405],{"text":57,"config":390},{"href":62,"dataGaName":57,"dataGaLocation":385},{"text":392,"config":393},"Code Suggestions (AI)",{"href":394,"dataGaName":392,"dataGaLocation":385},"/solutions/code-suggestions/",{"text":109,"config":396},{"href":111,"dataGaName":109,"dataGaLocation":385},{"text":398,"config":399},"GitLab on AWS",{"href":400,"dataGaName":398,"dataGaLocation":385},"/partners/technology-partners/aws/",{"text":402,"config":403},"GitLab on Google Cloud",{"href":404,"dataGaName":402,"dataGaLocation":385},"/partners/technology-partners/google-cloud-platform/",{"text":406,"config":407},"Why GitLab?",{"href":70,"dataGaName":406,"dataGaLocation":385},{"freeTrial":409,"mobileIcon":414,"desktopIcon":419,"secondaryButton":422},{"text":410,"config":411},"Start free trial",{"href":412,"dataGaName":33,"dataGaLocation":413},"https://gitlab.com/-/trials/new/","nav",{"altText":415,"config":416},"Gitlab Icon",{"src":417,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":415,"config":420},{"src":421,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":423,"config":424},"Get Started",{"href":425,"dataGaName":426,"dataGaLocation":413},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":428,"mobileIcon":432,"desktopIcon":434},{"text":429,"config":430},"Learn more about GitLab Duo",{"href":62,"dataGaName":431,"dataGaLocation":413},"gitlab duo",{"altText":415,"config":433},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":435},{"src":421,"dataGaName":418,"dataGaLocation":413},{"freeTrial":437,"mobileIcon":442,"desktopIcon":444},{"text":438,"config":439},"Back to pricing",{"href":190,"dataGaName":440,"dataGaLocation":413,"icon":441},"back to pricing","GoBack",{"altText":415,"config":443},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":445},{"src":421,"dataGaName":418,"dataGaLocation":413},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":452,"button":453,"image":458,"config":462,"_id":464,"_type":14,"_source":16,"_file":465,"_stem":466,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":454,"config":455},"Try the Beta",{"href":456,"dataGaName":457,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":459,"config":460},"GitLab Duo Agent Platform",{"src":461},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":463},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":468,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":469,"_id":673,"_type":14,"title":674,"_source":16,"_file":675,"_stem":676,"_extension":19},"/shared/en-us/main-footer",{"text":470,"source":471,"edit":477,"contribute":482,"config":487,"items":492,"minimal":665},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":472,"config":473},"View page source",{"href":474,"dataGaName":475,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":478,"config":479},"Edit this page",{"href":480,"dataGaName":481,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":483,"config":484},"Please contribute",{"href":485,"dataGaName":486,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":488,"facebook":489,"youtube":490,"linkedin":491},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[493,516,572,601,635],{"title":46,"links":494,"subMenu":499},[495],{"text":496,"config":497},"DevSecOps platform",{"href":55,"dataGaName":498,"dataGaLocation":476},"devsecops platform",[500],{"title":188,"links":501},[502,506,511],{"text":503,"config":504},"View plans",{"href":190,"dataGaName":505,"dataGaLocation":476},"view plans",{"text":507,"config":508},"Why Premium?",{"href":509,"dataGaName":510,"dataGaLocation":476},"/pricing/premium/","why premium",{"text":512,"config":513},"Why Ultimate?",{"href":514,"dataGaName":515,"dataGaLocation":476},"/pricing/ultimate/","why ultimate",{"title":517,"links":518},"Solutions",[519,524,526,528,533,538,542,545,549,554,556,559,562,567],{"text":520,"config":521},"Digital transformation",{"href":522,"dataGaName":523,"dataGaLocation":476},"/topics/digital-transformation/","digital transformation",{"text":134,"config":525},{"href":136,"dataGaName":134,"dataGaLocation":476},{"text":123,"config":527},{"href":105,"dataGaName":106,"dataGaLocation":476},{"text":529,"config":530},"Agile development",{"href":531,"dataGaName":532,"dataGaLocation":476},"/solutions/agile-delivery/","agile delivery",{"text":534,"config":535},"Cloud transformation",{"href":536,"dataGaName":537,"dataGaLocation":476},"/topics/cloud-native/","cloud transformation",{"text":539,"config":540},"SCM",{"href":119,"dataGaName":541,"dataGaLocation":476},"source code management",{"text":109,"config":543},{"href":111,"dataGaName":544,"dataGaLocation":476},"continuous integration & delivery",{"text":546,"config":547},"Value stream management",{"href":163,"dataGaName":548,"dataGaLocation":476},"value stream management",{"text":550,"config":551},"GitOps",{"href":552,"dataGaName":553,"dataGaLocation":476},"/solutions/gitops/","gitops",{"text":173,"config":555},{"href":175,"dataGaName":176,"dataGaLocation":476},{"text":557,"config":558},"Small business",{"href":180,"dataGaName":181,"dataGaLocation":476},{"text":560,"config":561},"Public sector",{"href":185,"dataGaName":186,"dataGaLocation":476},{"text":563,"config":564},"Education",{"href":565,"dataGaName":566,"dataGaLocation":476},"/solutions/education/","education",{"text":568,"config":569},"Financial services",{"href":570,"dataGaName":571,"dataGaLocation":476},"/solutions/finance/","financial services",{"title":193,"links":573},[574,576,578,580,583,585,587,589,591,593,595,597,599],{"text":205,"config":575},{"href":207,"dataGaName":208,"dataGaLocation":476},{"text":210,"config":577},{"href":212,"dataGaName":213,"dataGaLocation":476},{"text":215,"config":579},{"href":217,"dataGaName":218,"dataGaLocation":476},{"text":220,"config":581},{"href":222,"dataGaName":582,"dataGaLocation":476},"docs",{"text":243,"config":584},{"href":245,"dataGaName":246,"dataGaLocation":476},{"text":238,"config":586},{"href":240,"dataGaName":241,"dataGaLocation":476},{"text":248,"config":588},{"href":250,"dataGaName":251,"dataGaLocation":476},{"text":261,"config":590},{"href":263,"dataGaName":264,"dataGaLocation":476},{"text":253,"config":592},{"href":255,"dataGaName":256,"dataGaLocation":476},{"text":266,"config":594},{"href":268,"dataGaName":269,"dataGaLocation":476},{"text":271,"config":596},{"href":273,"dataGaName":274,"dataGaLocation":476},{"text":276,"config":598},{"href":278,"dataGaName":279,"dataGaLocation":476},{"text":281,"config":600},{"href":283,"dataGaName":284,"dataGaLocation":476},{"title":299,"links":602},[603,605,607,609,611,613,615,619,624,626,628,630],{"text":306,"config":604},{"href":308,"dataGaName":301,"dataGaLocation":476},{"text":311,"config":606},{"href":313,"dataGaName":314,"dataGaLocation":476},{"text":319,"config":608},{"href":321,"dataGaName":322,"dataGaLocation":476},{"text":324,"config":610},{"href":326,"dataGaName":327,"dataGaLocation":476},{"text":329,"config":612},{"href":331,"dataGaName":332,"dataGaLocation":476},{"text":334,"config":614},{"href":336,"dataGaName":337,"dataGaLocation":476},{"text":616,"config":617},"Sustainability",{"href":618,"dataGaName":616,"dataGaLocation":476},"/sustainability/",{"text":620,"config":621},"Diversity, inclusion and belonging (DIB)",{"href":622,"dataGaName":623,"dataGaLocation":476},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":339,"config":625},{"href":341,"dataGaName":342,"dataGaLocation":476},{"text":349,"config":627},{"href":351,"dataGaName":352,"dataGaLocation":476},{"text":354,"config":629},{"href":356,"dataGaName":357,"dataGaLocation":476},{"text":631,"config":632},"Modern Slavery Transparency Statement",{"href":633,"dataGaName":634,"dataGaLocation":476},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":636,"links":637},"Contact Us",[638,641,643,645,650,655,660],{"text":639,"config":640},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":476},{"text":368,"config":642},{"href":370,"dataGaName":371,"dataGaLocation":476},{"text":373,"config":644},{"href":375,"dataGaName":376,"dataGaLocation":476},{"text":646,"config":647},"Status",{"href":648,"dataGaName":649,"dataGaLocation":476},"https://status.gitlab.com/","status",{"text":651,"config":652},"Terms of use",{"href":653,"dataGaName":654,"dataGaLocation":476},"/terms/","terms of use",{"text":656,"config":657},"Privacy statement",{"href":658,"dataGaName":659,"dataGaLocation":476},"/privacy/","privacy statement",{"text":661,"config":662},"Cookie preferences",{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":666},[667,669,671],{"text":651,"config":668},{"href":653,"dataGaName":654,"dataGaLocation":476},{"text":656,"config":670},{"href":658,"dataGaName":659,"dataGaLocation":476},{"text":661,"config":672},{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":678,"featuredPost":1969,"totalPagesCount":1990,"initialPosts":1991},[679,706,728,751,773,796,818,838,863,884,903,924,945,966,987,1008,1029,1049,1071,1090,1111,1132,1154,1175,1195,1214,1233,1253,1273,1294,1313,1334,1354,1374,1393,1413,1435,1455,1473,1493,1513,1531,1550,1570,1590,1609,1629,1649,1668,1687,1708,1727,1749,1769,1790,1811,1830,1848,1868,1888,1909,1928,1950],{"_path":680,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":681,"content":689,"config":699,"_id":702,"_type":14,"title":703,"_source":16,"_file":704,"_stem":705,"_extension":19},"/en-us/blog/13-0-contributor-experience-update",{"title":682,"description":683,"ogTitle":682,"ogDescription":683,"noIndex":6,"ogImage":684,"ogUrl":685,"ogSiteName":686,"ogType":687,"canonicalUrls":685,"schema":688},"13.0 Contributor Experience Update","We're continually improving the tooling that helps our community of contributors build GitLab. Here's what's new over the last month!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669604/Blog/Hero%20Images/tanuki-bg-sm.gif","https://about.gitlab.com/blog/13-0-contributor-experience-update","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"13.0 Contributor Experience Update\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Deuley\"},{\"@type\":\"Person\",\"name\":\"Taurie Davis\"}],\n        \"datePublished\": \"2020-06-01\",\n      }",{"title":682,"description":683,"authors":690,"heroImage":684,"date":693,"body":694,"category":695,"tags":696},[691,692],"Patrick Deuley","Taurie Davis","2020-06-01","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThe Ecosystem group is the home of the [Pajamas Design System](https://design.gitlab.com) and the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit). Our job is to help you as a contributor work on GitLab, whether that's writing backend code, creating awesome new features in our UI, or testing feature branches to validate new work that's about to go in.\n\nThis is a collection of all the updates from the last month, including changes to Pajamas, new components in GitLab UI, new icons, new commands in the GDK, and more! Check it out below, and as always, please feel free to send us issues for new things we could improve or add. You can also find us in [#g_ecosystem](https://gitlab.slack.com/archives/CK4Q4709G) (for general inquiries), [#g_manage_foundations](https://gitlab.slack.com/archives/C010NAWPRV4) (for Pajamas and GitLab-UI), or [#gdk](https://gitlab.slack.com/archives/C2Z9A056E). 🎉\n\n\n## Pajamas\n\n* We introduced [semantic releases](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1866) and [conventional commits](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1863). \n* We updated our [component status table](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1878) to include different touchpoints. This includes Figma, Usage docs, Vue docs, GitLab UI state, and Accessibility audit statuses.\n\n## Usage guidelines\n\n* We added first usage guideline iterations for [Progress Bars](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1886) and [File Uploaders](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1799).\n* We also clarified the [position of buttons in alerts](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1889), as well as added [dismissal guidelines for banners](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1893).\n* We introduced documentation for our new [data visualization color palette](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1831).\n\n_[Check out the changelog](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/blob/main/CHANGELOG.md) for more improvements._\n\n## GitLab UI\n\n* Within GitLab UI, we introduced [UX Reviewers](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1358) to our review roulette bot.\n* We also started laying the foundation for using GitLab UI in our Docs as part of our [Beautifying the Docs](https://gitlab.com/groups/gitlab-org/-/epics/3063) effort.\n\n## Component updates\n\n* Our [markdown typescale](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1201) is now viewable with compact markdown coming soon.\n* We've [removed the documentation link from scoped labels](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1259), [standardized alert styling](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1344), added [filters](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1897) as built and ready to use, fixed [dropdown styles](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1322), and introduced the [loading button](https://gitlab.com/gitlab-org/gitlab-ui/-/merge_requests/1308). We also clarified that [filter and sorting functionality shouldn't be combined in the same component](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1879).\n\n_[Check out the changelog](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/CHANGELOG.md) for more improvements._\n\n## Figma migration\n\n* New components have been added to Figma!\n  * [Token](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1917)\n  * [Sorting](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1890)\n  * [Tree](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/merge_requests/1896)\n* We [deprecated the Sketch UI Kit](https://gitlab.com/gitlab-org/gitlab-design/-/merge_requests/243) in favor of the [Pajamas UI Kit](https://www.figma.com/community/file/781156790581391771) in Figma. We also determined [Project and Team library structure in Figma](https://gitlab.com/gitlab-org/gitlab-design/-/merge_requests/247/diffs) and added [guidance on collaborating within Figma vs. GitLab's Design Management](https://gitlab.com/gitlab-org/gitlab-design/-/merge_requests/248)\n\n# Icons\n\n* Issue templates for adding new [icons](https://gitlab.com/gitlab-org/gitlab-svgs/-/merge_requests/539) and [illustrations](https://gitlab.com/gitlab-org/gitlab-svgs/-/merge_requests/541) have been moved from [GitLab Design](https://gitlab.com/gitlab-org/gitlab-design) to [GitLab SVGs](https://gitlab.com/gitlab-org/gitlab-svgs).\n* The following icons were added to GitLab SVG:\n  * [Project, group, and subgroup](https://gitlab.com/gitlab-org/gitlab-svgs/-/merge_requests/562)\n  * [Expire](https://gitlab.com/gitlab-org/gitlab-svgs/-/merge_requests/564)\n  * [Container image](https://gitlab.com/gitlab-org/gitlab-svgs/-/merge_requests/566)\n\n# GDK\n\n* [GDK doctor](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name[]=gdk-doctor&milestone_title=13.0) now checks a bunch of Ruby gems that have C extensions that are known to cause issues\n* There's [a new `quiet` mode](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1267) that reduces the \"noise\" during the bundle installation. This should make it more obvious when actionable things happen. Try it out and let us know if this is helpful!\n\n## Documentation\n* We added some [general commands](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/troubleshooting.md#troubleshooting) that are good to know when troubleshooting GDK errors.\n* We've [restructured the documentation](https://gitlab.com/gitlab-org/gitlab-development-kit/-/tree/master/doc) to make it easier to navigate! This includes moving non-core content into `doc/howto` to centralize specific how to docs in one place.\n* There were also [24 other documentation improvements](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name[]=documentation&milestone_title=13.0)!! 🎉\n* [YARD documentation](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1213) is now automatically generated and accessible at https://gitlab-org.gitlab.io/gitlab-development-kit/\n* [Webpack is now able to be run in different modes](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1201/)\n* There's now a doc on how to [configure a hosted Grafana instance](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/prometheus/third_party_access.md#configuring-a-hosted-grafana-instance)\n\n## CI\n  - All [shell scripts](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1190)  within the project are now checked\n  - Documentation linted even further with the [introduction of Vale](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1193)\n  - Ruby code is now [checked with RuboCop](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1206)\n  - CI config was [split up into templates](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1229)\n  - CI jobs are now [interruptible by default](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1256)\n\n## Misc\n  - [Sidekiq logging now JSON by default](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/1198)\n\n","unfiltered",[697,698,9],"contributors","careers",{"slug":700,"featured":6,"template":701},"13-0-contributor-experience-update","BlogPost","content:en-us:blog:13-0-contributor-experience-update.yml","13 0 Contributor Experience Update","en-us/blog/13-0-contributor-experience-update.yml","en-us/blog/13-0-contributor-experience-update",{"_path":707,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":708,"content":714,"config":722,"_id":724,"_type":14,"title":725,"_source":16,"_file":726,"_stem":727,"_extension":19},"/en-us/blog/a-ci-component-builders-journey",{"title":709,"description":710,"ogTitle":709,"ogDescription":710,"noIndex":6,"ogImage":711,"ogUrl":712,"ogSiteName":686,"ogType":687,"canonicalUrls":712,"schema":713},"A CI/CD component builder's journey","Learn how a creator of shared, includable templates upskilled by migrating the templates to GitLab CI/CD components and the CI/CD Catalog.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663857/Blog/Hero%20Images/blog-image-template-1800x945__12_.png","https://about.gitlab.com/blog/a-ci-component-builders-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A CI/CD component builder's journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-06-04\",\n      }",{"title":709,"description":710,"authors":715,"heroImage":711,"date":717,"body":718,"category":719,"tags":720},[716],"Darwin Sanoy","2024-06-04","I've always found it fascinating that my father, a heavy-duty mechanic by trade, would make his own tools for challenging jobs for which his industry had not yet built a fit-to-purpose tool. Little did I realize I'd become a tool builder in IT, which has been one of my loves for many years now.\n\nI have been building GitLab CI/CD includable, shared templates since starting with GitLab over four years ago. They were designed in a specific way for others to depend directly on them – similar to the dependency managers you see in application languages like Node.js NPM, Python Pypi, and .NET NuGet.\n\nGitLab itself has had long experience in building these shared CI dependencies through [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and all of our security scanning suite of tools.\n\nWith the introduction of [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), this long-running approach is formalized into a way for everyone to publish GitLab CI/CD components for use by anyone in the world.\n\nSome of they key upgrades compared to the shared templates approach include:\n\n- **Independent component versions** are a new versioning mechanism that no longer relies on inheriting containers versions. GitLab CI/CD component versions bundle together the CI code and any number of containers (or no containers) behind a single CI/CD component version. The concepts of stable, production-grade DevSecOps require the ability to peg dependency versions in automation – for exactly the same reasons and benefits that this is done in production-grade application code.\n\n- **Global visibility (with control)** is available through the catalog at GitLab.com (or global to your company on a self-managed instance). Individual component visibility is also subject to the security settings of it's source project - so you can publish components to secure groups.\n\n- **Catalog metadata,** like most code-sharing mechanisms, is needed data to make decisions about which components to use.\n\n## Let's show some code\n\nI much prefer to show than tell, so let's look at a few component examples - all of which also publish their sources publicly (click on the title to access the component).\n\n### 1. [Hello World](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world)\nI noticed that there was not yet a Hello World component that could show the minimum viable component, both the results and the source. This particular example shows how to \"componentize\" just CI code.\n\n### 2. [Hello World Container](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world-container)\nFrequently a CI/CD component will require a container to be fully functional. This example includes a container that is published in the same project as the component itself.\n\n### 3. [GitVersion Ultimate Auto Semversioning](https://gitlab.com/explore/catalog/guided-explorations/ci-components/ultimate-auto-semversioning)\n\nThis component automates the venerable \"GitVersion\" utility, which completely automates selecting the next semversion for your software without having to store the last version – even for busy repositories where many production-possible candidates are being worked on at once. One of the building principles this component follows is the principle of \"least configuration\" or \"default to doing the most useful thing with zero configuration.\" In this case, if your project does not contain a `GitVersion.yml`, the component creates the one that an individual unfamiliar with GitVersion might find to be the most useful starting point.\n\n### 4. [Amazon CodeGuru Secure SAST Scanner](https://gitlab.com/explore/catalog/guided-explorations/ci-components/aws/amazon-codeguru-secure-sast)\nThis component is a security scanner and, as such, follows some security scanning best practices I have implemented during recent years. For instance, if it detects that you are licensed for GitLab Ultimate, it has the scanner output GitLab's SAST JSON format, which integrates the findings just like native GitLab scanner findings. The findings appear in MRs and dashboards and can be the target of security policy merge approvals. If, however, you are not licensed for GitLab Ultimate, the scanner outputs JUNIT XML so that you have some basic, non-diffed findings visualization in the pipeline \"Test Results\" tab. It also only activates if there are file types it can scan and disables if the GitLab SAST_DISABLED property is turned on.\n\n### 5. [Checkov IaC SAST](https://gitlab.com/explore/catalog/guided-explorations/ci-components/checkov-iac-sast)\nCheckov IaC SAST is another security scanner component that also follows the above security scanner principles, but specifically for the file types it is capable of scanning. A critical best practice of many of these components is pegging container tags for stability - but doing so through a \"component input\" with a default value. This allows component users to test with and peg to a newer or older version than you last tested with. So your shared dependency then offers stability, but with flexibility.\n\n### 6. [Super-Linter](https://gitlab.com/guided-explorations/ci-components/super-linter)\nSuper-Linter is a community-driven conglomeration of many linters for many languages. It originally started life as a GitHub Action, so this particular example demonstrates some of the ease of porting open source GitHub Actions to GitLab CI/CD components. A best practice aspect to many of my components is to always link to working example code with the component in action. This also allows you to do easy testing when performing updates.\n\n### 7. [Kaniko](https://gitlab.com/explore/catalog/guided-explorations/ci-components/kaniko)\nKaniko is a container that can build containers without Docker-in-Docker (DinD) privileged mode requirement. This component supports many OpenContainers labels and multi-arch builds.\n\n### 8. [CI Component Publishing Utilities](https://gitlab.com/explore/catalog/guided-explorations/ci-components/ci-component-pub)\nAs I built more components, I noticed that my \"component publishing CI code\" was being duplicated many times - and that makes it a candidate for becoming a component itself. All the other components here leverage this component. It also uses components itself, so it uses **GitVersion Ultimate Auto Semversioning** to get the next version.\n\nAnd if you're wondering, yes, CI Component Publishing Utilities publishes itself. In many of my components I have expanded the standard \"Inputs\" README section to \"Inputs and Configuration\" and I have added a column to show whether configurations are happening via inputs or variables. While you generally want to favor inputs, there are times when variables give more flexibility or you just want to document that the user can get perform key configurations of the underlying utilities via environment variables that the utility already supports. CI Component Publishing Utilities also uses the **Kaniko** CI component to build a container with the same version if it finds a Dockerfile at the root of your project (or you tell it where one is with a variable). This synchronizes the version of components and containers that support them. It also handles multi-arch container builds - see the documentation linked above to learn more!\n\n## Getting started with component templates\n\nThe Hello World components function as my own personal templates for starting a new component. They incorporate the CI Component Publishing Utilities and a reasonably good README.\n\nFor components that contain only CI code, I start by copying the source of [Hello World](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world) and for ones that require a container, I start with [Hello World Container](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world-container). I generally copy just the source into a new project so that I have a clean commit history.\n\nWhen I feel the component is stable and well developed I do a manual pipeline run and force the version to 1.1.0 or greater. The CI Component Publishing Utilities will then auto-increment the version from there.\n\n## CI component Builders Guides and practices\n\n[Darwins CI Component Builders Guide](https://gitlab.com/guided-explorations/ci-components/gitlab-profile) - I was also interested in publishing my approach to building components and what better way to get visibility than as a CI/CD component? BTW, the [GitLab Pipeline Authoring](https://about.gitlab.com/direction/verify/pipeline_composition/) team that created the CI/CD component architecture and [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) has some great best practices published at [CI components best practices](https://docs.gitlab.com/ee/ci/components/#best-practices). The practices I publish reference these ones, but I also have quite a few I follow that are specific to my own lessons learned.\n\n## Finding the CI/CD components and their sources\n\nThe [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) is still undergoing innovation in searchability. However, the description from the source project is free-form searchable, so by including standard text in the descriptions of all my component source projects, I have created the ability for users to [find all of the ones I've created in the catalog](https://gitlab.com/explore/catalog?search=Part+of+the+DarwinJS+Builder+Component+Library).\n\nTo make my component source findable regardless of its location on GitLab.com:\n- I add a repository topic to all the projects called [DarwinJS Component Builder Library](https://gitlab.com/explore/projects/topics/DarwinJS+Component+Builder+Libary).\n- I tag with the organic tag I found called [`GitLab CICD Components`](https://gitlab.com/explore/projects/topics/GitLab+CICD+Components).\n\nBoth of the above techniques can help you provide an index to your components and their source if you are inclined to do so.\n\nI hope that my CI/CD component building journey will be helpful to you now and in the future.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n> \n> - [Introducing CI/CD components and how to use them in GitLab](https://about.gitlab.com/blog/introducing-ci-components/)\n>","open-source",[109,721,9],"CI",{"slug":723,"featured":6,"template":701},"a-ci-component-builders-journey","content:en-us:blog:a-ci-component-builders-journey.yml","A Ci Component Builders Journey","en-us/blog/a-ci-component-builders-journey.yml","en-us/blog/a-ci-component-builders-journey",{"_path":729,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":730,"content":736,"config":745,"_id":747,"_type":14,"title":748,"_source":16,"_file":749,"_stem":750,"_extension":19},"/en-us/blog/a-story-of-runner-scaling",{"title":731,"description":732,"ogTitle":731,"ogDescription":732,"noIndex":6,"ogImage":733,"ogUrl":734,"ogSiteName":686,"ogType":687,"canonicalUrls":734,"schema":735},"An SA story about hyperscaling GitLab Runner workloads using Kubernetes","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669897/Blog/Hero%20Images/kaleidico-26MJGnCM0Wc-unsplash.jpg","https://about.gitlab.com/blog/a-story-of-runner-scaling","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An SA story about hyperscaling GitLab Runner workloads using Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Brian Wald\"}],\n        \"datePublished\": \"2022-06-29\",\n      }",{"title":731,"description":732,"authors":737,"heroImage":733,"date":739,"body":740,"category":741,"tags":742},[716,738],"Brian Wald","2022-06-29","\n\nThe following *fictional story*\u003Csup>1\u003C/sup> reflects a repeating pattern that Solutions Architects at GitLab encounter frequently. In the analysis of this story we intend to demonstrate three things: (a) Why one should be thoughtful in leveraging Kubernetes for scaling, (b) How unintended consequences of an approach to automation can create a net productivity loss for an organization (reversal of ROI) and (c) How solutions architecture perspectives can help find anti-patterns - retrospectively or when applied during a development process.\n\n### A DevOps transformation story snippet\n\nGild Investment Trust went through a DevOps transformational effort to build efficiency in their development process through automation with GitLab. Dakota, the application development director, knew that their current system handled about 80 pipelines with 600 total tasks and over 30,000 CI minutes so they knew that scaled CI was needed. Since development occurred primarily during European business hours, they were interested in reducing compute costs outside of peak work hours. Cloud compute was also a target due to acquring the pay per use model combined with elastic scaling.\n\nIngrid was the infrastructure engineer for developer productivity who was tasked with building out the shared GitLab Runner fleet to meet the needs of the development teams. At the beginning of the project she made a successful bid to leverage Kubernetes to scale CI and CD to take advantage of the elastic scaling and high availability all with the efficiency of containers. Ingrid had recently achieved the Certified Kubernetes Administrator (CKA) certification and she was eager to put her knowledge to practical use. She did some additional reading around applications running on Kubernetes and noted the strong emphasis on minimizing the resource profile of microservices to achieve efficiency in the form of compute density. She defined runner containers with 2GB of memory and 750millicores (about three quarters of a CPU) had good results from running some test CI pipelines. She also decided to leverage the Kubernetes Cluster Autoscaler which would use the overall cluster utilization and scheduling to automatically add and remove Kubernetes worker nodes for smooth elastic scaling in response to demand.\n\nAbout 3 months into the proof of concept implementation, Sasha, a developer team lead, noted that many of their new job types were failing with strange error messages. The same jobs ran fine on quickly provisioned GitLab shell runners. Since the primary difference between the environments was the liberal allocation of machine resources in a shell runner, Sasha reasoned that the failures were likely due to the constrained CPU and memory resources of the Kubernetes pods.\n\nTo test this hypothesis, Ingrid decided to add a new pod definition. She found it was difficult to discern which of the job types were failing due to CPU constraints, which ones due to memory constraints and which ones due to the combination of both. She knew it could be a lot of her time to discern the answer. She decided to simply define a pod that was more liberal on both CPU and memory and have it be selectable by runner tagging when more resources were needed for certain CI jobs. She created a GitLab Runner pod definition with 4GB of memory and 1750 millicores of CPU to cover the failing job types. Developers could then use these larger containers when the smaller ones failed by adding the ‘large-container’ tag to their GitLab job.\n\nSasha redid the CI testing and was delighted to find that the new resourcing made all the troubling jobs work fine. Sasha created a guide for developers to try to help discern when mysterious error messages and failed CI jobs were probably the fault of resourcing and then how to add a runner tag to the job to expand the resources.\n\nSome weeks later two of the key jobs that were fixed by the new container resourcing started intermittently failing on NPM package creation jobs for just 3 pipelines on 2 different teams. Of course Sasha tried to understand what the differences were and found that these particular pipelines were packaging notably large file sets because they were actually packaging testing data and the NPM format was a convenient way to provide testing data during automated QA testing.\n\nSasha brought this information to Ingrid and together they did testing to figure out that a 6GB container with 2500 millicores would be sufficient for creating an NPM package out of the current test dataset size. They also discussed whether the development team might want to use a dedicated test data management solution, but it turned out that the teams needs were very simple and that their familiarity with NPM packaging meant that bending NPM packaging to suit their purpose was actually more efficient than acquiring, deploying, learning and maintaining a special system for this purpose. So a new pod resourcing profile was defined and could be accessed with the runner tag ‘xlarge’.\n\nSasha updated the guide for finding the optimal container size through failure testing of CI jobs - but they were not happy with how large the document was getting and how imprecise the process was for determining when a CI job failure was, most likely due to container resource constraints. They were concerned that developers would not go through the process and instead simply pick the largest container resourcing profile in order to avoid the effort of optimizing and they shared this concern with Ingrid. In fact, Sasha noted, they were hard pressed themselves to follow their own guidelines and not to simply choose the largest container for all jobs themselves.\n\nThe potential for this cycle to repeat was halted several months later when Dakota, the app dev director, generated a report that showed a 2% increase in developer time spent optimizing CI jobs using failure testing for container size optimization. Dakota considered this work to be a net new increase because when the company was not using container-based CI, the developers did not have to manage this concern at all. Across 298 developers this amounted to around $840,000/yr dollars of total benefits per month\u003Csup>2\u003C/sup>. It was also thought to add about 2 hours (and growing) to developer onboarding training. It was noted that the report did not attempt to account for the opportunity cost tax - what would these people be doing to solve customer problems with that time? It also did not account for the \"critical moments tax\" (when complexity has an outsized frustration effect and business impact on high pressure, high risk situations).\n\n### Solution architecture retrospective: What went wrong?\n\nThis story reflects a classic antipattern we see at GitLab, not only with regard to Kubernetes runner optimization, but also across other areas, such as overly minimalized build containers and the potential for resultant pipeline complexity as was discussed in a previous blog called [When the pursuit of simplicity creates complexity in container-based CI pipelines](/blog/second-law-of-complexity-dynamics/). Frequently this result comes from inadvertent adherance to heuristics of a small part of the problem as though they were applicable to the entirety of the problem (a type of a logical “fallacy of composition”).\n\nThankfully the emergence of the anti-pattern follows a pattern itself :). Let’s apply a little retrospective solution architecture to the \"what happened\" in order to learn what might be done proactively next time to create better iterations on the next automation project.\n\nThere is a certain approach to landscaping shared greenspaces where, rather than shame people into compliance with signs about not cutting across the grass in key locations, the paths that humans naturally take are interpreted as the signal “there should be a path here.” Humans love beauty and detail in the environments they move through, but depending on the space, they can also value the efficiency of the shortest possible route slightly higher than aesthetics. A wise approach to landscaping holds these factors in a balance that reflects the efficiency versus aesthetic appeal balance of the space user. The space stays beautiful without any shaming required.\n\nIn our story Sasha and Ingrid had exactly this kind of cue where the developers were likely to walk across the grass. If that cue is taken to be a signal that reflects efficiency, we can quickly see what can be done to avoid the antipattern when it starts to occur.\n\nThe signal was the observation that developers might simply choose the largest container all the time to avoid the fussy process of optimizing the compute resources being consumed. Some would consider that laziness and not a good signal to heed. However, most human laziness is deeply rooted in efficiency trade-offs. The developers intuitively understand that their time fussing with failure testing to optimize job containers and their time diagnosing intermittent failures due to the varying content of those jobs, is not worth the amount of compute saved. That is especially true given the opportunity cost of not spending that time innovating the core software solution for the revenue generating application.\n\nIngrid and Sasha’s collaboration has initially missed the scaled human toil factor that was introduced to keep container resources at the minimum tolerable levels. They failed to factor in the escalating cost of scaled human toil to have a comprehensive efficiency measurement. They were following a microservices resourcing pattern which assumes the compute is purpose designed around minimal and well known workloads. When taken as a whole in a shared CI cluster, CI compute follows generalized compute patterns where the needs for CPU, Memory, Disk IO and Network IO can vary wildly from one moment to the next.\n\nIn the broadest analysis, the infrastructure team over indexed to the “team local” optimization of compute efficiency and unintentionally created a global de-optimization of scaled human toil for another team.\n\n## How can this antipattern be avoided?\n\nOne way to combat over indexing on a criteria is to have balancing objectives. This need is covered in \"Measure What Matters\" with the concept of counter balancing objectives. There are some counter balancing questions that can be asked of almost any automation effort. When solution architecture is functioning well these counter balancing questions are asked during the iterative process of building out a solution. Here are some applicable ones for this effort:\n\n**Approporiate Rules: Does the primary compute optimization heuristic match the characteristics of the actual compute workload being optimized?**\n\nThe main benefits of container compute for CI are dependency isolation, dependency encapsulation and a clean build environment for every job. None of these benefits has to do with the extreme resource optimizations available to engineer microservices architected applications. As a whole, CI compute reflects generalized compute, not the ultra-specialized compute of a 12 factor architected micro-service.\n\n**Appropriate granularity: Does optimization need to be applied at every level?**\n\nThe fact that the cluster itself has elastic scaling at the Kubernetes node level is a higher order optimization that will generate significant savings. Another possible optimization that would not require continuous fussing by developers is having a node group running on spot compute (as long as the spot compute runners self-identify their compute as spot so pipeline engineers can select appropriate jobs for spot). These optimizations can create huge savings, without creating scaled human toil.\n\n**People and processes counter check: Does the approach to optimization create scaled human toil by its intensity and/or frequency and/or lack of predictability for any people anywhere in the organization?**\n\nAutomation is all about moving human toil into the world of machines. While optimizing machine resources must always be a primary consideration, it is a lower priority objective than creating a net increase in human toil anywhere in your company. Machines can efficiently and elastically scale, while human workforces respond to scaling needs in months or even years.\n\n### Avoid scaled human toil\n\nNotice that neither the story, nor the qualifying questions, imply there is never a valid reason to have specialized runners that developers might need to select using tags. If a given attribute of runners could be selected once and with confidence then the antipattern would not be in play. One example would be selecting spot compute backed runners for workloads that can tolerate termination. It is the potential for repeated needed attention to calibrate container sizing - made worse by the possibility of intermittent failure based on job content - that pushes this specific scenario into the potential realm of “scaled human toil.” The ability to leverage elastic cluster autoscaling is also a huge help to managing compute resources more efficiently.\n\nIf the risk of scaled human toil could be removed then some of this approach may be able to be preserved. For example, having very large minimum pod resourcing and then a super-size for stuff that breaks the standard pod size just once. Caution is still warranted because it is still possible that developers have to fuss a lot to get a two pod approach working in practice.\n\n### Beware of scaled human toil of an individual\n\nOne thing the story did not highlight is that even if we were able to move all the fussing of such a design to the Infrastructure Engineer persona (perhaps by building an AI tuning mechanism that guesses at pod resourcing for a given job), the cumulative taxes on their role are frequently still not worth the expense. This is, in part, because they have a leveraged role - they help with all the automation of the scaled developer workforce and any time they spend on one activity can’t be spent on another. We humans are generally bad at accounting for opportunity costs - what else could that specific engineer be innovating on to make a stronger overall impact to the organization’s productivity or bottom line? Given the very tight IT labor market, a given function may not be able to add headcount, so opportunity costs take on an outsized importance.\n\n### Unlike people’s time, cloud compute does not carry opportunity cost\n\nA long time ago people had to schedule time on shared computing resources. If the time was used for low-value compute activities it could be taking away time from higher value activities. In this model compute time has an opportunity cost - the cost of what it could be using that time for if it wasn’t doing a lower value activity. Cloud compute has changed this because when compute is not being used, it is not being paid for. Additionally, elastic scaling eliminates the costs of over provisioning hardware and completely eliminates the administrative overhead of procuring capacity - if you need lots for a short period of time it is immediately available. In contrast, people time is not elastically scalable nor pay per use. This means that the opportunity cost question “What could this time be used for if it didn’t have to be spent on low value activities?” is still relevant for anything that creates activities for people.\n\n### The first corollary to the Second Law of Complexity Dynamics\n\nThe Second Law of Complexity Dynamics was introduced in an earlier blog. The essence is that complexity is never destroyed - it is only reformed - and primarily it is moved across a boundary line that dictates whether the management of the complexity is in our domain or externalized. For instance, if you write a function for md5 hashing in your code, you are managing the complexity of that code. If you install a dependency package that contains a premade md5 hash function that you simply use, then the complexity is externalized and managed for you by someone else.\n\nIn this story we are introducing the corollary to that “Law” that “**Exchanging Raw Machine Resources for Complexity Management is Generally a Reasonable Trade-off.**” In this case our scaled human toil is created due to the complexity of unending, daily management of optimizing compute efficiency. This does not mean that burning thousands of dollars of inefficient compute is OK because it saved someone 20 minutes of fussing. It is scoped in the following way:\n\n- scoped to “complexity management” (which is creating the “scaled human toil” in our story) - many minutes of toil that increases proportionally or compounds with more of the activity.\n- scoped to “raw machine resources” - meaning that there is not additional logistics nor human toil to gain the resources. In the cloud raw machine resources are generally available via configuration tweaks.\n- scoped to “generally reasonable” - this indicates a disposition of being very cautious about increasing human toil with an automatoin solution - but it still makes sense to use models or calculations to check if the rule actually holds in a given case.\n\nSo if we can externalize complexity management that is great (The Second Law of Complexity Dynamics). If we can trade complexity management for raw computing resource, that is likely still better than managing it ourselves (The First Corollary).\n\n### Iterating SA: Experimental improvements for your next project\n\nThis post contains specifics that can be used to avoid antipatterns in building out a Kubernetes cluster for GitLab CI. However, in the qualifying questions we’ve attempted to kick it up to one meta-level higher to help assess whether any automation effort may have an “overly local” optimization focus which can inadvertently create a net loss of efficiency across the more global “company context.” It is our opinion that automation efforts that create a net loss in human productivity should not be classified as automation at all. While it’s strong medicine to apply to one’s work, we feel that doing so causes appropriate innovation pressure to ensure that individual automation efforts truly deliver on their inherent promise of higher human productivity and efficiency. So simply ask “Does this way of solving a problem cause recurring work for anyone?”\n\n### DevOps transformation and solution architecture perspectives\n\nA technology architecture focus rightfully hones in on the technology choices for a solution build. However, if it is the only lens, it can result in scenarios like our story. Solutions architecture steps back to a broader perspective to sanity-check that solution iterations account for a more complete picture of both the positive and negative impacts across all three of people, processes and technology. As an organizational competency, DevOps emphasis solution architecture perspectives when it is defined as a collaborative and cultural approach to people, processes and technology.\n\nFootnotes:\n\n1. This fictional story was devised specifically for this article and does not knowingly reflect the details of any other published story or an actual situation. The names used in the story are from [GitLab’s list of personas](https://handbook.gitlab.com/handbook/product/personas/).\n2. Across a team of 300 full time developers. 9.6min/workday x 250 workdays / year = 2400mins / 8hrs/workday  = 5 workdays x $560 per day (140K Total Comp/250days) = $2800/dev/year x 300 developers = $840,000/yr\n\nCover image by [Kaleidico](https://unsplash.com/@kaleidico?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n","engineering",[721,9,743,744],"performance","solutions architecture",{"slug":746,"featured":6,"template":701},"a-story-of-runner-scaling","content:en-us:blog:a-story-of-runner-scaling.yml","A Story Of Runner Scaling","en-us/blog/a-story-of-runner-scaling.yml","en-us/blog/a-story-of-runner-scaling",{"_path":752,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":753,"content":759,"config":767,"_id":769,"_type":14,"title":770,"_source":16,"_file":771,"_stem":772,"_extension":19},"/en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"title":754,"description":755,"ogTitle":754,"ogDescription":755,"noIndex":6,"ogImage":756,"ogUrl":757,"ogSiteName":686,"ogType":687,"canonicalUrls":757,"schema":758},"A visual guide to GitLab CI/CD caching","Learn cache types, as well as when and how to use them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682443/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A visual guide to GitLab CI/CD caching\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthieu Fronton\"}],\n        \"datePublished\": \"2022-09-12\",\n      }",{"title":754,"description":755,"authors":760,"heroImage":756,"date":762,"body":763,"category":741,"tags":764},[761],"Matthieu Fronton","2022-09-12","If you've ever worked with GitLab CI/CD you may have needed, at some point,\nto use a cache to share content between jobs. The decentralized nature of\nGitLab CI/CD is a strength that can confuse the understanding of even the\nbest of us when we want to connect wires all together. For instance, we need\nto know critical information such as the difference between artifacts and\ncache and where/how to place setups.\n\n\nThis visual guide will help with both challenges.\n\n\n## Cache vs. artifacts\n\n\nThe concepts _may_ seem to overlap because they are about sharing content\nbetween jobs, but they actually are fundamentally different:\n\n\n- If your job does not rely on the the previous one (i.e. can produce it by\nitself but if content already exists the job will run faster), then use\ncache.\n\n- If your job does rely on the output of the previous one (i.e. cannot\nproduce it by itself), then use artifacts and dependencies.\n\n\nHere is a simple sentence to remember if you struggle between choosing cache\nor artifact:\n\n> Cache is here to speed up your job but it may not exist, so don't rely on\nit.\n\n\nThis article will focus on **cache**.\n\n\n## Initial setup\n\n\nWe'll go with a simple representation of the GitLab CI/CD pipelining model\nand ignore (for now) that the jobs can be executed on any runners and hosts.\nIt will help get the basics.\n\n\nLet's say you have:\n\n- 1 project with 3 branches\n\n- 1 host running 2 docker runners\n\n\n![Initial\nsetup](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-1.png){:\n.shadow.center}\n\n\n## Local cache: Docker volume\n\n\nIf you want a [local\ncache](https://docs.gitlab.com/ee/ci/caching/index.html#where-the-caches-are-stored)\nbetween all your jobs running on the same runner, use the [cache\nstatement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your\n`.gitlab-ci.yml`:\n\n\n```yaml\n\ndefault:\n  cache:\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n\n![local / container / all branches / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-2.png){:\n.shadow.center}\n\n\nUsing the [predefined\nvariable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\n`CI_COMMIT_REF_NAME` as the [cache\nkey](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure\nthe cache is tied to a specific branch:\n\n\n```yaml\n\ndefault:\n  cache:\n    key: $CI_COMMIT_REF_NAME\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n\n![local / container / one branch / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-3.png){:\n.shadow.center}\n\n\nUsing the [predefined\nvariable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\n`CI_JOB_NAME` as the [cache\nkey](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure\nthe cache is tied to a specific job:\n\n\n![local / container / all branch / one\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-4.png){:\n.shadow.center}\n\n\n## Local cache: Bind mount\n\n\nIf you don't want to use a volume for caching purposes (debugging purpose,\ncleanup disk space more easily, etc.), you can configure a [bind mount for\nDocker volumes](https://docs.docker.com/storage/bind-mounts/) while\nregistering the runner. With this setup, you do not need to set up the\n[cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your\n`.gitlab-ci.yml`:\n\n\n```yaml\n\n#!/bin/bash\n\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner\"                       \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n\n```\n\n\n![local / one runners / one host / all branch / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-5.png){:\n.shadow.center}\n\n\nIn fact, this setup even allows you to share a cache between jobs running on\nthe same host without requiring you to set up a distributed cache (which\nwe'll talk about later):\n\n\n```yaml\n\n#!/bin/bash\n\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner X\"                     \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n\n\ngitlab-runner register                                 \\\n  --name=\"Bind-Mount Runner Y\"                         \\\n  --docker-volumes=\"/host/path:/container/alt/path:rw\" \\\n...\n\n```\n\n\n![local / multiple runners / one host / all branch / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-6.png){:\n.shadow.center}\n\n\n## Distributed cache\n\n\nIf you want to have a [shared\ncache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching)\nbetween all your jobs running on multiple runners and hosts, use the \u003Ca\nhref=\"https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscache-section\">[runner.cache]\u003Ca>\nsection in your `config.toml`:\n\n\n```yaml\n\n[[runners]]\n  name = \"Distributed-Cache Runner\"\n...\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"bucket/path/prefix\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"\u003Cchangeme>\"\n      SecretKey = \"\u003Cchangeme>\"\n      BucketName = \"foobar\"\n      BucketLocation = \"us-east-1\"\n```\n\n\n![remote / multiple runners / multiple hosts / all branch / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-7.png){:\n.shadow.center}\n\n\nUsing the predefined variable `CI_COMMIT_REF_NAME` as the cache key you can\nensure the cache is tied to a specific branch between multiple runners and\nhosts:\n\n\n![remote / multiple runners / multiple hosts / one branch / all\njobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-8.png){:\n.shadow.center}\n\n\n## Real-life setup\n\n\nThe above assumptions allowed you to harness your understanding of the\nconcepts and possibilities.\n\n\nIn real life, you'll face more complex wiring and we hope this article will\nhelp you as a visual cheatsheet along with the reference documentation.\n\n\nJust to give you a sneak peek, here is an exercise for you:\n\n\n- Set up a cache between all the jobs of a specific stage, running on any\nrunner and any hosts, but only between pipeline of the same branches:\n\n\n![Real-life test\nassignment](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-9.png){:\n.shadow.center}\n\n\nHappy caching, folks!\n\n\n\n\nCover image by [Alina Grubnyak](https://unsplash.com/@alinnnaaaa) on\n[Unsplash](https://unsplash.com)\n\n{: .note}\n",[721,9,765,766],"DevOps","tutorial",{"slug":768,"featured":6,"template":701},"a-visual-guide-to-gitlab-ci-caching","content:en-us:blog:a-visual-guide-to-gitlab-ci-caching.yml","A Visual Guide To Gitlab Ci Caching","en-us/blog/a-visual-guide-to-gitlab-ci-caching.yml","en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"_path":774,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":775,"content":781,"config":790,"_id":792,"_type":14,"title":793,"_source":16,"_file":794,"_stem":795,"_extension":19},"/en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium",{"title":776,"description":777,"ogTitle":776,"ogDescription":777,"noIndex":6,"ogImage":778,"ogUrl":779,"ogSiteName":686,"ogType":687,"canonicalUrls":779,"schema":780},"How to action security vulnerabilities in GitLab Premium","Learn step-by-step how to process detected vulnerabilities and spawn merge request approval rules from critical vulnerabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099637/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_security-pipelines.jpg_1750099637178.jpg","https://about.gitlab.com/blog/actioning-security-vulnerabilities-in-gitlab-premium","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to action security vulnerabilities in GitLab Premium\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-03-13\",\n      }",{"title":776,"description":777,"authors":782,"heroImage":778,"date":785,"body":786,"category":787,"tags":788},[783,784],"Sam Morris","Noah Ing","2023-03-13","GitLab Premium features several security scanners you can leverage to detect\nvulnerabilities. However, when you incorporate the scanners into your\nproject pipelines and the scanning job succeeds, you'll want feedback on\nwhether you are introducing vulnerabilities into the codebase. This tutorial\nprovides a mechanism to require a merge request approval if a scanner\navailable on GitLab Premium finds a critical vulnerability.\n\n\n*While this tutorial shows how to add some process around actioning\nvulnerabilities, we have more robust, governed, and user-friendly\nfunctionality available in GitLab Ultimate called a [Scan Result\nPolicy](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html).\nThe solution outlined here does not seek to replace that functionality, but\nrather augment the scan results available in GitLab Premium. If you are an\nUltimate user or if you want to compare the two experiences, then you should\ncheck out [this video\nintroduction](https://www.youtube.com/watch?v=w5I9gcUgr9U&ab_channel=GitLabUnfiltered)\ninstead.*\n\n\nLearn how to do the following:\n\n\n1. Set up a .gitlab-ci.yml\n\n2. Add in a vulnerability processing script\n\n3. Require approval if vulnerabilities are found \n\n\n### Prerequisites\n\n\n- A project with GitLab Premium\n\n- A gitlab-ci.yml\n\n- A project access token\n\n- Basic knowledge of Python\n\n- 5 minutes (or less)\n\n\n## Setup the gitlab-ci.yml \n\n\nThis is how the GitLab CI pipeline of our test project looks visually. Below\nwe will break down the individual stages.\n\n\nAdd the following to your .gitlab-ci.yml:\n\n\n```yaml\n\nsecret_detection:\n  artifacts:\n    paths:\n      - gl-secret-detection-report.json\n\nprocess_secret_detection:\n   image: python:3.7-alpine3.9\n   stage: process_vulns\n   needs:\n    - job: secret_detection\n      artifacts: true\n   before_script:\n      pip install python-gitlab\n   script:\n     - python3 process_vulns.py gl-secret-detection-report.json $PROJECT_ACCESS_TOKEN $CI_PROJECT_ID $CI_COMMIT_SHA\n```\n\n\nA breakdown of what is going on above:\n\n- gl-secret-detection-report.json needs to be overriden so it’s being stored\nas an artifact in the secret_detection job.\n\n- The process_secret_detection job is dependent on secret_detection's\nartifact so we have added a needs keyword requiring successful completion of\nthe secret_detection job.\n\n- pip installs the python-gitlab dependency so that the process_vulns.py can\nleverage GitLab API calls.\n\n- The process_vulns.py is taking in four arguments:\n   - gl-secret-detection-report.json is the JSON report produced from the secret_detection scanner. If you would like to take in another report this will need to be modified.\n   - $PROJECT_ACCESS_TOKEN needs to be added; review the instructions on creating a project access token in the next step.\n   - $CI_PROJECT_ID and $CI_COMMIT_SHA are both GitLab CI environment variables that will automatically be inferred.\n\n### Create a project access token\n\n\nTo create a project access token:\n\n1. On the top bar, select Main menu > Projects and find your project.\n\n2. On the left sidebar, select Settings > Access Tokens.\n\n3. Enter a name. The token name is visible to any user with permissions to\nview the project.\n\n4. Optional. Enter an expiry date for the token. The token expires on that\ndate at midnight UTC. An instance-wide maximum lifetime setting can limit\nthe maximum allowable lifetime in self-managed instances.\n\n5. Select a role for the token.\n\n6. Select the desired scopes.\n\n7. Select Create project access token.\n\n8. Add this newly created project access token to your CI/CD variables in\nyour project settings!\n\n\n## Add in the vulnerability processing script\n\n\n[The process_vulns.py script can be found\nhere.]((https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blob/main/process_vulns.py)\nCopy that file into your project.\n\n\nThe goal of this script is to require approval from an author (or group of\nauthors) if a critical vulnerability is found.\n\n\n**Note:** You will need to [change the user ID in the\nprocess_vulns.py](https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blame/main/process_vulns.py#L40)\nto match the user ID of your designated Approver at your organization.\n\n\nThe following is a breakdown of what the script is doing:\n\n\n- JSON security reports are loaded in, if there any vulnerabilities they are\nparsed.\n\n- An authentication with GitLab is run using the project access token to\ninteract with the project.\n\n- If vulnerabilities are not found, then it will print to the GitLab CI\nLogs: “No vulnerabilities are found.”\n\n- If a critical vulnerability is found, then it will require an approval.\n\n\nRun the pipeline and voila! Your pipeline now requires approvers if a\ncritical vulnerability is found!\n\n\n### Demo\n\n\nWatch a video demonstration of how to action security vulnerabilities in\nGitLab Premium, presented by Sam Morris:\n\n\n\u003Ciframe width=\"560\" height=\"315\"\nsrc=\"https://www.youtube.com/embed/Cld36OZrLFo\" title=\"YouTube video player\"\nframeborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write;\nencrypted-media; gyroscope; picture-in-picture; web-share\"\nallowfullscreen>\u003C/iframe>\n\n\n#### Caveats\n\n- This is mimicking a Scan Result Policy; it is not a replacement.\n\n- This currently only requires approval for a critical vulnerability, and\neach new rule would have to be added to the script.\n\n- This script lives within the same location as your project, so there is no\nrestriction on who can modify the script, breaking separation of duties at\nscale.\n\n- Approval rules are not removed once the vulnerability is fixed.\n\n- Approvers' IDs need to be hardcoded and maintained in the script file.\n\n- Since there is no vulnerability record generated, you cannot track the\nvulnerabilities over time in your application.\n\n- Vulnerabilities are not fed into a report or security dashboard, so this\nonly reports merge request vulnerabilities.\n\n\n## References\n\n- [Create a project access\ntoken](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token)\n\n- [Setting up CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui)\n\n- [Secure Premium app\nproject](https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blob/main/process_vulns.py)\n\n\n## Related posts\n\n- [GitLab's commitment to enhanced application security in the modern DevOps\nworld](/blog/security-gitlab-15/)\n\n- [How to become more productive with Gitlab\nCI](/blog/how-to-become-more-productive-with-gitlab-ci/)\n\n- [GitLab CI DRY\nDevelopment](/blog/keeping-your-development-dry/)\n\n\n_Cover image by [Christopher\nBurns](https://unsplash.com/@christopher__burns?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://www.unsplash.com)._\n","security",[787,789,721,9,766],"DevSecOps",{"slug":791,"featured":6,"template":701},"actioning-security-vulnerabilities-in-gitlab-premium","content:en-us:blog:actioning-security-vulnerabilities-in-gitlab-premium.yml","Actioning Security Vulnerabilities In Gitlab Premium","en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium.yml","en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium",{"_path":797,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":798,"content":804,"config":812,"_id":814,"_type":14,"title":815,"_source":16,"_file":816,"_stem":817,"_extension":19},"/en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"title":799,"description":800,"ogTitle":799,"ogDescription":800,"noIndex":6,"ogImage":801,"ogUrl":802,"ogSiteName":686,"ogType":687,"canonicalUrls":802,"schema":803},"Amazon Linux 2 support and distro-specific packages for GitLab","Learn how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682299/Blog/Hero%20Images/gitlab-blog-banner.png","https://about.gitlab.com/blog/amazon-linux-2-support-and-distro-specific-packages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Amazon Linux 2 support and distro-specific packages for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-02\",\n      }",{"title":799,"description":800,"authors":805,"heroImage":801,"date":806,"body":807,"category":808,"tags":809},[716],"2022-05-02","GitLab’s Distribution Engineering team has been hard at work getting Amazon\nLinux 2 distro-specific packages ready in preparation for GitLab’s official\nsupport of Amazon Linux 2. Starting with Version 15.0 of GitLab, Amazon\nLinux 2 is a supported distro and packages are available for both x86 and\nGraviton ARM architectures.\n\n\n## What is Amazon Linux 2?\n\n\nAmazon Linux 2 is the next-generation Amazon Linux operating system that\nprovides a modern application environment with the most recent enhancements\nfrom the Linux community alongside long-term support. Amazon Linux 2 is\naccessible as a virtual machine image for on-premises development and\ntesting. This lets you easily develop, test, and certify your applications\nright from your local development environment. \n\n\nAccording to the AWS FAQ page for Amazon Linux 2, the primary elements of\nthis latest version of the operating system include:\n\n\n1. A Linux kernel tuned for performance on Amazon EC2.\n\n\n2. A set of core packages including systemd, GCC 7.3, Glibc 2.26, Binutils\n2.29.1 that receive Long Term Support (LTS) from\n[AWS](/blog/deploy-aws/).\n\n\n3. An extras channel for rapidly evolving technologies that are likely to be\nupdated frequently and outside the Long Term Support (LTS) model.\n\n\nAmazon Linux 2 has a support lifespan through June 20, 2024, to allow enough\ntime for users to migrate to Amazon Linux 2022.\n\n\n\n## Safely moving forward to Amazon Linux 2 packages from EL7\n\n\nWhile Amazon Linux 2 has not been officially supported before 15.0, as a\nconvenience to customers who wanted to use yum and RPM packages to install\nthe EL7 packages, GitLab configured a workaround in our packaging services\nto direct Amazon Linux 2 yum requests to the EL7 packages. If you’ve been\nusing GitLab’s yum repo registration script, you many not know that you were\nusing EL7 packages and not distro-specific packages.\n\n\nThis workaround will be deprecated and requests from Amazon Linux 2 will get\nthe distro-specific packages with the release of GitLab 15.3.0 on August 22,\n2022.\n\n\nAs a convenience for those of you who have automation that depends directly\non this workaround, we wanted to provide you with information on how to do\nearly testing as well as how to peg your automation to the EL 7 packages\nuntil you are able to properly integrate the changes into your automation.\n\n\nGitLab documentation demonstrates how to call our managed yum repository\nsetup scripts by downloading the latest copy and running it directly in [the\ninstructions for installing\ninstances](https://about.gitlab.com/install/#centos-7) and [the instructions\nfor installing\nrunners](https://docs.gitlab.com/runner/install/linux-repository.html).\n\n\nAny organization using GitLab’s EL 7 packages for Amazon Linux 2 will want\nto test with - and update to - the distro-specific packages as soon as\npossible as GitLab will only be testing Amazon Linux 2 with the Amazon Linux\n2 specific packages going forward.\n\n\nWe also understand that the timing of the testing and migration to these\npackages must be done in a coordinated cutover so that the package type does\nnot change in your existing stacks without you having made any changes. This\ncan be more important if a GitLab stack has undergone platform qualification\nfor compliance purposes.\n\n\nAmazon Linux 2 specific packages are only available for GitLab 14.9.0 and\nlater. If your automation depends directly on GitLab’s repo configuration\nscript and it is still pegged to a GitLab version prior to 14.9.0 when this\nchange becomes GA, then action must be taken to prevent breaking that\nautomation. We have devised an idempotent two-line script solution that you\ncan put in place now to prevent disruption if you are still on a pre-14.9.0\nversion at the time the new behavior of `script.rpm.sh` becomes GA on August\n22, 2022 with the release of GitLab 15.3.0.\n\n\nGitLab rake-based backup and restore will continue to work seamlessly across\nthe distro-specific package changes if you have to restore to your Amazon\nLinux 2 built stack from an EL7 backup. If you are using third-party backup,\nyou may wish to trigger a new backup immediately after transitioning to the\nnew distro packages to avoid the scenario altogether.\n\n\n## Amazon Linux 2 packages for building GitLab instances before 15.3.0\n\n\nThe following code inserts two lines of code between those originally\noutlined in [the instructions for installing using RPM\npackages](/install/#centos-7). The first one (starts with `sed`) splices in\nthe Amazon Linux 2 yum repo endpoint edits into the repository configuration\nfile created by script.rpm.sh. The second one (starts with `if yum`) cleans\nthe yum cache if the package was already installed so that the new location\nwill be used.\n\n\n> Sudo note: If you are using these commands interactively under the default\nSSH or SSM session manager user, then using `sudo su` before running this\ncode is necessary. If you are using these commands in Infrastructure as Code\n(e.g. CloudFormation userdata scripts), then sudo may cause ‘command not\nfound’ errors when the user running automation is already root equivalent.\nBe mindful about using interactively tested commands directly in your\nautomation.\n\n\n```bash\n\n#Existing packaging script from https://about.gitlab.com/install/#centos-7\n\ncurl\nhttps://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh\n| sudo bash\n\n\n#Patch to preview and/or peg Amazon Linux 2 specific packages\n\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n\n#Existing install command (remove \"-y\" to validate package and arch mapping\nbefore install)\n\nyum install gitlab-ee -y\n\n```\n\n\n> Notice in this output that the **Version** ends in `.amazon2`. In this\ncase the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n\n![Resolved GitLab\nDependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-instance-dependencies-resolved.png)\n\n\n### Moving to Amazon Linux 2 packages early for a seamless post-GA\ntransition\n\n\nWhen the script.rpm.sh script is cut over to always point Amazon Linux 2 to\nthe new distro-specific packages, the sed command will no longer be\nnecessary. However, sed is also idempotent and will not make edits if the\nsearch text is not found. This means you can use the sed command to switch\nover early, but not have to worry about a breaking change when the\n`script.rpm.sh` is updated.\n\n\n### Pegging EL 7 and/or a GitLab version prior to 14.9.0 for a seamless\npost-GA transition\n\n\nIf your automation is pegged to an earlier version of GitLab, you will need\nto keep using EL7 packages, and, in fact, after the cutover you would need\nto implement the opposite command (which is also idempotent to be\nimplemented now).\n\n\n```bash\n\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA\nof gitlab repo script)\n\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n```\n\n\nJust like the sed command for taking distro-specific packages early, this\ncommand can be implemented immediately with no bad effects - which will\nseamlessly keeping your automation pegged to the EL 7 packages when\n`script.rpm.sh` is updated.\n\n\n## Amazon Linux 2 package for building GitLab Runners before 15.3.0\n\n\nThe following code inserts two lines of code between those originally\n[outlined in the\ninstructions](https://docs.gitlab.com/runner/install/linux-repository.html).\nThe first one (starts with `sed`) splices in the Amazon Linux 2 yum repo\nendpoint edits into the repository configuration file created by\nscript.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if\nthe package was already installed so that the new location will be used.\n\n\n> Sudo note: If you are using these commands interactively under the default\nSSH or SSM session manager user, then using `sudo su` before running this\ncode is necessary. If you are using these commands in Infrastructure as Code\n(e.g. CloudFormation userdata scripts), then sudo may cause ‘command not\nfound’ errors when the user running automation is already root equivalent.\nBe mindful about using interactively tested commands directly in your\nautomation.\n\n\n```bash\n\n#Existing packaging script from\nhttps://docs.gitlab.com/runner/install/linux-repository.html\n\ncurl -L\n\"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\"\n| sudo bash\n\n\n#Patch to test or peg Amazon Linux 2 specific packages\n\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n\n#Existing install command (remove \"-y\" to validate package and arch mapping\nbefore install)\n\nyum install gitlab-runner -y\n\n```\n\n\n> Notice in this output that **Version** is not distro-specific. In this\ncase the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n\n![Resolved GitLab Runner\nDependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-runner-dependencies-resolved.png)\n\n\n## Pegging to EL 7 and/or a GitLab Runner version prior to 14.9.1 for a\nseamless post-GA transition\n\n\nThe underlying package for EL 7 and Amazon Linux 2 is literally a copy of\nthe same package. However, the Amazon Linux 2 endpoint for Runner RPM\npackages have only been uploaded from GitLab Runner 14.9.1 and later, so if\nyou have runners that need to be on an earlier version, you would need to\nstay pointed at EL 7 for those packages to continue to resolve as available.\nThe following code shows how to do that for GitLab Runner.\n\n\n```bash\n\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA\nof gitlab repo script)\n\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n```\n\n\n## Need-to-know takeaways\n\n\n- Amazon Linux 2 is a supported distro for GitLab instances and runner as of\nthe release of version 15.0 on May 22, 2022.\n\n- Amazon Linux 2 packages are available for x86 and ARM for GitLab Version\n14.9.0 and higher. (Prior to 14.9.0 the EL7 packages must be used as they\nhave a long version history).\n\n- This is the first availability of ARM RPM packages of GitLab for Amazon\nLinux 2.\n\n- In 15.3 (August 22, 2022), the script.rpm.sh will automatically start\ndirecting to the Amazon Linux 2 packages where it had previously directed\nAmazon Linux 2 yum requests to the EL7 packages.\n\n- It is common to have taken a dependency directly on the latest version of\nthis GitLab script in other automation.\n\n- Before the GA cutover date of August 22, 2022 (15.3.0 GitLab Release), for\nthese scripts, you have the opportunity to pre-test these packages and\ndetermine whether they create any issues with your automation or GitLab\nconfiguration.\n\n- You can also peg to the Amazon Linux 2 packages early or peg to the EL7\npackages in advance if you find problems that you need more time to resolve.\nBoth of these pegging types are idempotent, meaning the code changes do not\ndo anything that causes problems after the change over happens.\n\n- Existing Amazon Linux 2 installations that were installed using the EL7\npackages can use a regular yum upgrade command to start using the new Amazon\nLinux 2 packages. This operation may also be an upgrade of the product\nversion at the same time. For existing installations you will need to patch\nthe yum repo files as explained in this article in order to upgrade directly\nto Amazon Linux 2 from EL7 using packages. \n\n\n> **Note**\n\n> This blog post and linked pages contain information related to upcoming\nproducts, features, and functionality. It is important to note that the\ninformation presented is for informational purposes only. Please do not rely\non this information for purchasing or planning purposes. As with all\nprojects, the items mentioned in this blog post and linked pages are subject\nto change or delay. The development, release, and timing of any products,\nfeatures, or functionality remain at the sole discretion of GitLab Inc.\n\n\n![AWS Partner\nLogo](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/awsgravitonready.png){:\n.right}\n","news",[810,721,9,766,811],"releases","AWS",{"slug":813,"featured":6,"template":701},"amazon-linux-2-support-and-distro-specific-packages","content:en-us:blog:amazon-linux-2-support-and-distro-specific-packages.yml","Amazon Linux 2 Support And Distro Specific Packages","en-us/blog/amazon-linux-2-support-and-distro-specific-packages.yml","en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"_path":819,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":820,"content":826,"config":832,"_id":834,"_type":14,"title":835,"_source":16,"_file":836,"_stem":837,"_extension":19},"/en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"title":821,"description":822,"ogTitle":821,"ogDescription":822,"noIndex":6,"ogImage":823,"ogUrl":824,"ogSiteName":686,"ogType":687,"canonicalUrls":824,"schema":825},"How to automate a Twitter bot using GitLab CI/CD","This tutorial shows how to use the DevSecOps platform to create a set-and-forget Twitter bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661856/Blog/Hero%20Images/ci-cd-demo.jpg","https://about.gitlab.com/blog/automating-a-twitter-bot-using-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate a Twitter bot using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-03-21\",\n      }",{"title":821,"description":822,"authors":827,"heroImage":823,"date":829,"body":830,"category":741,"tags":831},[828],"Siddharth Mathur","2023-03-21","\n\nGitLab's CI/CD pipelines are great for automating many things, like deployments to Google Kubernetes Engine and security scans. But did you know that you could use GitLab CI/CD pipelines to run a set-and-forget Twitter bot?\n\nMany organizations today are leveraging Twitter's API to [understand customer sentiment](https://developer.twitter.com/en/blog/success-stories/target), [track public health data](https://developer.twitter.com/en/blog/success-stories/penn), [perform financial analysis](https://developer.twitter.com/en/blog/success-stories/likefolio), and more. While these bots may be running on self-managed infrastrucuture or external services, you can simplify and consolidate your tooling by leveraging GitLab instead, making your bot easier to manage.\n\nWith GitLab's [Free tier](/pricing/), you can leverage 400 minutes of CI/CD run time per month to automatically analyze and post tweets. With GitLab [Premium](/pricing/premium) and [Ultimate](/pricing/ultimate), you'll get even more pipeline minutes to tweet more, run longer natural language processing analyses, or for other projects.\n\nSetting up a Twitter bot using GitLab is pretty simple. At the end of this blog, you'll have a project that looks like [this](https://gitlab.com/smathur/twitter-bot), and a Twitter account that automatically posts a simple tweet.\n\nTo get started, you'll need these prerequisites:\n- GitLab account (self-hosted with GitLab Runner(s) set up or on GitLab.com)\n- Twitter API credentials\n\nOnce you've generated your Twitter API credentials, we can start building out our bot in GitLab. In this blog, we'll leverage GitLab's Web IDE based on Visual Studio Code, but feel free to use a code editor of your choice.\n\n## Step 1: Write a Python script to post tweets\n\n![Navigate to the Web IDE](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/web-ide.png){: .shadow}\n\nCreate a new blank project in GitLab, and click the \"Web IDE\" button to start writing some code. In the Web IDE, create a new file called `run_bot.py`, and paste the following code (this is where you interact with the Twitter API):\n\n```python\nimport tweepy\nimport config\n\ndef set_up():\n\tauth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret_key)\n\tauth.set_access_token(config.access_token, config.access_token_secret)\n\tapi = tweepy.API(auth)\n\treturn api\n\ndef run(tweet):\n\tapi = set_up()\n\tapi.update_status(tweet)\n\nrun('It\\'s Tanuki time')\n```\n\n**Note:** If you're familiar with Python, you'll notice that we're importing a file called `config` with some variables that we're using. This `config` file doesn't exist yet, but we'll create it from within a GitLab pipeline, leveraging CI/CD variables to securely store and use our Twitter API credentials.\n\nCreate another file called `requirements.txt`, and paste the following line:\n\n```\ntweepy\n```\n\nChanges to files in the Web IDE will be automatically saved, so switch to the Git tab and commit your changes.\n\n## Step 2: Create a CI/CD pipeline to run your Python script\n\nNext, we'll create a CI/CD pipeline script to run our Twitter bot and post a tweet every time the pipeline is run. To do this, you can:\n1. Create a new file using the Web IDE called `.gitlab-ci.yml`, or\n2. Head to your GitLab project, and from the sidebar, click CI/CD > Editor.\n\nIf you see some default text in the pipeline configuration, delete everything to start with a clean slate.\n\nIn the pipeline YAML file, we'll first specify the Docker image we want to run the bot on:\n\n```yaml\nimage: python:latest\n```\n\n**Note:** Normally in a pipeline, we would define stages first and then write jobs that are each assigned to a specific stage. Since we're only running one job in this pipeline, we don't need to specify stages at the top of our pipeline configuration file.\n\nNext, we'll add a job called `run` that runs the Python script we created in the previous step. Inside this job, we'll add a `script` section to run some commands that will execute our Python script.\n\n```yaml\nrun:\n  script:\n    - echo \"consumer_key = '$CONSUMER_KEY'\" >> config.py\n    - echo \"consumer_secret_key = '$CONSUMER_SECRET'\" >> config.py\n    - echo \"access_token = '$ACCESS_TOKEN'\" >> config.py\n    - echo \"access_token_secret = '$ACCESS_SECRET'\" >> config.py\n    - pip install -r requirements.txt\n    - python3 run_bot.py\n```\n\nCommit your changes. The pipeline will automatically run, since you just made a change to the project files, but it will fail. This is because we are calling some CI/CD variables in the pipeline, which we haven't set yet. Let's go ahead and do that!\n\n## Step 3: Set CI/CD variables to store API tokens\n\nHead to your GitLab project and from the sidebar, go to Settings > CI/CD.\n\nExpand the \"Variables\" section and add the `ACCESS_SECRET`, `ACCESS_TOKEN`, `CONSUMER_KEY`, and `CONSUMER_SECRET` variables as shown below (these are your Twitter API credentials):\n\n![CI/CD variables](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/ci-cd-variables.png){: .shadow}\n\nNote that the secrets are masked to prevent them from showing up in job logs (check the \"Mask variable\" box when creating/editing the variable).\n\n## Step 4: Test and schedule your Twitter bot\n\nNow that we've got everything set up, all we need to do is run the bot. Go to CI/CD > Pipelines, and click \"Run pipeline\". Click \"Run pipeline\" again, and wait for the `run` job to finish. If you've set up your Twitter credentials correctly, you should see that the pipeline successfully ran, and a tweet was posted on your bot account!\n\n![Schedule a pipeline](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/schedule-pipeline.png){: .shadow}\n\nOnce you've verified that your pipeline runs successfully, schedule your pipeline to automatically run at a regular interval. Go to CI/CD > Schedules, and click \"New schedule\". Feel free to use one of the default provided intervals, or use cron to set a custom schedule. Specify a timezone, and ensure that the \"Active\" checkbox is checked. Finally, click \"Save pipeline schedule\". You'll see that your pipeline has been scheduled to run, and when it will run next.\n\nAnd that's it! You now have a fully-functional Twitter bot running on GitLab, using CI/CD pipelines to automatically post tweets. While this demo Twitter bot simply posts a specified text message, you can add your own logic to [generate sentences using AI](https://linguatools.org/language-apis/sentence-generating-api/), [perform sentiment analysis on other users' tweets](https://www.analyticsvidhya.com/blog/2021/06/twitter-sentiment-analysis-a-nlp-use-case-for-beginners/), and more. Running a Twitter bot is just one of the many ways you can leverage pipelines in GitLab, and you can also check out some other [interesting use cases](https://docs.gitlab.com/ee/ci/examples/).\n",[789,721,9,766],{"slug":833,"featured":6,"template":701},"automating-a-twitter-bot-using-gitlab-cicd","content:en-us:blog:automating-a-twitter-bot-using-gitlab-cicd.yml","Automating A Twitter Bot Using Gitlab Cicd","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd.yml","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"_path":839,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":840,"content":846,"config":857,"_id":859,"_type":14,"title":860,"_source":16,"_file":861,"_stem":862,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":841,"description":842,"ogTitle":841,"ogDescription":842,"noIndex":6,"ogImage":843,"ogUrl":844,"ogSiteName":686,"ogType":687,"canonicalUrls":844,"schema":845},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":841,"description":842,"authors":847,"heroImage":843,"date":850,"body":851,"category":741,"tags":852},[848,849],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code\nSuggestions](https://about.gitlab.com/solutions/code-suggestions/) need\nlow-latency response times for a frictionless developer experience. Users\ndon’t want to interrupt their flow and wait for a code suggestion to show\nup. To ensure GitLab Duo can provide the right suggestion at the right time\nand meet high performance standards for critical AI infrastructure, GitLab\nrecently launched our first multi-region service to deliver AI features.\n\n\nIn this article, we will cover the benefits of multi-region services, how we\nbuilt an internal platform codenamed ‘Runway’ for provisioning and deploying\nmulti-region services using GitLab features, and the lessons learned\nmigrating to multi-region in production.\n\n\n## Background on the project\n\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning,\ndeploying, and operating containerized services. Runway's purpose is to\nenable GitLab service owners to self-serve infrastructure needs with\nproduction readiness out of the box, so application developers can focus on\nproviding value to customers. As part of [our corporate value of\ndogfooding](https://handbook.gitlab.com/handbook/values/#results), the first\niteration was built in 2023 by the Infrastructure department on top of core\nGitLab capabilities, such as continuous integration/continuous delivery\n([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and\ndeployments.\n\n\nBy establishing automated GitOps best practices, Runway services use\ninfrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\n\nGitLab Duo is primarily powered by [AI\nGateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist),\na satellite service written in Python outside of GitLab’s modular monolith\nwritten in Ruby. In cloud computing, a region is a geographical location of\ndata centers operated by cloud providers.\n\n\n## Defining a multi-region strategy\n\n\nDeploying in a single region is a good starting point for most services, but\ncan come with downsides when you are trying to reach a global audience.\nUsers who are geographically far from where your service is deployed may\nexperience different levels of service and responsiveness than those who are\ncloser. This can lead to a poor user experience, even if your service is\nwell built in all other respects.\n\n\nFor AI Gateway, it was important to meet global customers wherever they are\nlocated, whether on GitLab.com or self-managed instances using Cloud\nConnector. When a developer is deciding to accept or reject a code\nsuggestion, milliseconds matter and can define the user experience.\n\n\n### Goals\n\n\nMulti-region deployments require more infrastructure complexity, but for use\ncases where latency is a core component of the user experience, the benefits\noften outweigh the downsides. First, multi-region deployments offer\nincreased responsiveness to the user. By serving requests from locations\nclosest to end users, latency can be significantly reduced. Second,\nmulti-region deployments provide greater availability. With fault tolerance,\nservices can fail over during a regional outage. There is a much lower\nchance of a service failing completely, meaning users should not be\ninterrupted even in partial failures.\n\n\nBased on our goals for performance and availability, we used this\nopportunity to create a scalable multi-region strategy in Runway, which is\nbuilt leveraging GitLab features.\n\n\n### Architecture\n\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud\nPlatform (GCP). As a result, Runway’s first supported platform runtime is\nCloud Run. The initial workloads deployed on Runway are stateless satellite\nservices (e.g., AI Gateway), so Cloud Run services are a good fit that\nprovide a clear migration path to more complex and flexible platform\nruntimes, e.g. Kubernetes.\n\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to\niterate and tease out the right level of abstractions for service owners as\npart of a platform play in the Infrastructure department.\n\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region\ndeployment strategy must support global load balancing, and the provisioning\nand configuration of regional resources. Here’s a simplified diagram of the\nproposed architecture in GCP:\n\n\n![simplified diagram of the proposed architecture in\nGCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\n\nBy replicating Cloud Run services across multiple regions and configuring\nthe existing global load balancing with serverless network endpoint group\n(NEG) backends, we’re able to serve traffic from multiple regions. For the\nremainder of the article, we’ll focus less on specifics of Cloud Run and\nmore on how we’re building with GitLab.\n\n\n## Building a multi-region platform with GitLab\n\n\nNow that you have context about Runway, let's walk through how to build a\nmulti-region platform using GitLab features.\n\n\n### Provision\n\n\nWhen building an internal platform, the first challenge is provisioning\ninfrastructure for a service. In Runway, Provisioner is the component that\nis responsible for maintaining a service inventory and managing IaC for GCP\nresources using Terraform.\n\n\nTo provision a service, an application developer will open an MR to add a\nservice project to the inventory using git, and Provisioner will create\nrequired resources, such as service accounts and identity and access\nmanagement policies. When building this functionality with GitLab, Runway\nleverages [OpenID Connect (OIDC) with GPC Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/)\nfor managing IaC.\n\n\nAdditionally, Provisioner will create a deployment project for each service\nproject. The purpose of creating separate projects for deployments is to\nensure the [principle of least\nprivilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/)\nby authenticating as a GCP service account with restricted permissions.\nRunway leverages the [Projects\nAPI](https://docs.gitlab.com/ee/api/projects.html) for creating projects\nwith [Terraform\nprovider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\n\nFinally, Provisioner defines variables in the deployment project for the\nservice account, so that deployment CI jobs can authenticate to GCP. Runway\nleverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and\n[Job Token\nallowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist)\nto handle authentication and authorization.\n\n\nHere’s a simplified example of provisioning a multi-region service in the\nservice inventory:\n\n\n```\n\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n\n```\n\n\nOnce provisioned, a deployment project and necessary infrastructure will be\ncreated for a service.\n\n\n### Configure\n\n\nAfter a service is provisioned, the next challenge is the configuration for\na service. In Runway,\n[Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl)\nis a component that is responsible for configuring and deploying services by\naligning the actual state with the desired state using Golang and Terraform.\n\n\nHere’s a simplified example of an application developer configuring GitLab\nCI/CD in their service project:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n\n```\n\n\nRunway provides sane default values for configuration that are based on our\nexperience in delivering stable and reliable features to customers.\nAdditionally, service owners can configure infrastructure using a service\nmanifest file hosted in a service project. The service manifest uses JSON\nSchema for validation. When building this functionality with GitLab, Runway\nleverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema\ndocumentation.\n\n\nTo deliver this part of the platform, Runway leverages [CI/CD\ntemplates](https://docs.gitlab.com/ee/development/cicd/templates.html),\n[Releases](https://docs.gitlab.com/ee/user/project/releases/), and\n[Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for\nintegrating with service projects.\n\n\nHere’s a simplified example of a service manifest:\n\n\n```\n\n# .runway/runway-production.yml\n\napiVersion: runway/v1\n\nkind: RunwayService\n\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n\n```\n\n\nFor multi-region services, Runway injects an environment variable into the\ncontainer instance runtime, e.g. RUNWAY\\_REGION, so application developers\nhave the context to make any downstream dependencies regionally-aware, e.g.\nVertex AI API.\n\n\nOnce configured, a service project will be integrated with a deployment\nproject.\n\n\n### Deploy\n\n\nAfter a service project is configured, the next challenge is deploying a\nservice. In Runway, Reconciler handles this by triggering a deployment job\nin the deployment project when an MR is merged to the main branch. When\nbuilding this functionality with GitLab, Runway leverages [Trigger\nPipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project\nPipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines)\nto trigger jobs from service project to deployment project.\n\n\n![trigger jobs from service project to deployment\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\n\nOnce a pipeline is running in a deployment project, it will be deployed to\nan environment. By default, Runway will provision staging and production\nenvironments for all services. At this point, Reconciler will apply any\nTerraform resource changes for infrastructure. When building this\nfunctionality with GitLab, Runway leverages\n[Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and\n[GitLab-managed Terraform\nstate](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html)\nfor each service.\n\n\n![Reconciler applies any Terraform resource changes for\ninfrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\n\nRunway provides default application metrics for services. Additionally,\ncustom metrics can be used by enabling a sidecar container with\nOpenTelemetry Collector configured to scrape Prometheus and remote write to\nMimir. By providing observability out of the box, Runway is able to bake\nmonitoring into CI/CD pipelines.\n\n\nExample scenarios include gradual rollouts for blue/green deployments,\npreventing promotions to production when staging is broken, or automatically\nrolling back to previous revision when elevated error rates occur in\nproduction.\n\n\n![Runway bakes monitoring into CI/CD\npipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\n\nOnce deployed, environments will serve the latest revision of a service. At\nthis point, you should have a good understanding of some of the challenges\nthat will be encountered, and how to solve them with GitLab features.\n\n\n## Migrating to multi-region in production\n\n\nAfter extending Runway components to support multi-region in Cloud Run, the\nfinal challenge was migrating from AI Gateway’s single-region deployment in\nproduction with zero downtime. Today, teams using Runway to deploy their\nservices can self-serve on regions making a multi-region deployment just as\nsimple as a single-region deployment. \n\n\nWe were able to iterate on building multi-region functionality without\nimpacting existing infrastructure by using semantic versioning for Runway.\nNext, we’ll share some learnings from the migration that may inform how to\noperate services for an internal multi-region platform.\n\n\n### Dry run deployments\n\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off\nis that plans cannot be verified in advance, which could risk inadvertently\ndestroying or misconfiguring production infrastructure. To solve this\nproblem, Runway will perform a “dry run” deployment for MRs.\n\n\n![\"Dry run\"\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\n\nFor migrating AI Gateway, dry run deployments increased confidence and\nhelped mitigate risk of downtime during rollout. When building an internal\nplatform with GitLab, we recommend supporting dry run deployments from the\nstart.\n\n\n### Regional observability\n\n\nIn Runway, existing observability was aggregated by assuming a single-region\ndeployment. To solve this problem, Runway observability was retrofitted to\ninclude a new region label for Prometheus metrics.\n\n\nOnce metrics were retrofitted, we were able to introduce service level\nindicators (SLIs) for both regional Cloud Run services and global load\nbalancing. Here’s an example dashboard screenshot for a general Runway\nservice:\n\n\n![dashboard screenshot for a general Runway\nservice](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nAdditionally, we were able to update our service level objectives (SLOs) to\nsupport regions. As a result, service owners could be alerted when a\nspecific region experiences an elevated error rate, or increase in response\ntimes.\n\n\n![screenshot of\nalerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nFor migrating AI Gateway, regional observability increased confidence and\nhelped provide more visibility into new infrastructure. When building an\ninternal platform with GitLab, we recommend supporting regional\nobservability from the start.\n\n\n### Self-service regions\n\n\nThe Infrastructure department successfully performed the initial migration\nof multi-region support for AI Gateway in production with zero downtime.\nGiven the risk associated with rolling out a large infrastructure migration,\nit was important to ensure the service continued working as expected.\n\n\nShortly afterwards, service owners began self-serving additional regions to\nmeet the growth of customers. At the time of writing, [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) is available in six regions\naround the globe and counting. Service owners are able to configure the\ndesired regions, and Runway will provide guardrails along the way in a\nscalable solution.\n\n\nAdditionally, three other internal services have already started using\nmulti-region functionality on Runway. Application developers have entirely\nself-served functionality, which validates that we’ve provided a good\nplatform experience for service owners. For a platform play, a scalable\nsolution like Runway is considered a good outcome since the Infrastructure\ndepartment is no longer a blocker.\n\n\n## What’s next for Runway\n\n\nBased on how quickly we could iterate to provide results for customers, the\nSaaS Platforms department has continued to invest in Runway. We’ve grown the\nRunway team with additional contributors, started evolving the platform\nruntime (e.g. Google Kubernetes Engine), and continue dogfooding with\ntighter integration in the product.\n\n\nIf you’re interested in learning more, feel free to check out\n[https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n\n## More Building GitLab with GitLab\n\n- [Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n- [Stress-testing Product\nAnalytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n\n- [Web API Fuzz\nTesting](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n\n- [How GitLab.com inspired\nDedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n\n- [Expanding our security certification\nportfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n",[109,9,721,853,766,743,854,855,789,856],"inside GitLab","google","git","AI/ML",{"slug":858,"featured":91,"template":701},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":864,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":865,"content":871,"config":878,"_id":880,"_type":14,"title":881,"_source":16,"_file":882,"_stem":883,"_extension":19},"/en-us/blog/cd-automated-integrated",{"title":866,"description":867,"ogTitle":866,"ogDescription":867,"noIndex":6,"ogImage":868,"ogUrl":869,"ogSiteName":686,"ogType":687,"canonicalUrls":869,"schema":870},"GitLab’s automated and integrated continuous delivery","Learn about how the power of GitLab Auto DevOps can help increase productivity and speed up releases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681872/Blog/Hero%20Images/CD-2st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-automated-integrated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s automated and integrated continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-01-22\",\n      }",{"title":866,"description":867,"authors":872,"heroImage":868,"date":874,"body":875,"category":695,"tags":876},[873],"Cesar Saavedra","2021-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nOrganizations adopting DevOps best practices to software delivery spend time and effort designing, building, testing, integrating, and maintaining CI/CD pipelines for their different projects. Just as they must spend some of their time maintaining their business applications instead of innovating, they must do the same for their pipelines. Freeing your developers so that they can spend more of their time creating new business applications and differentiating value to the business is of utmost importance to remain competitive in a world where organizations must be digital leaders to succeed in the marketplace.\n\nGitLab provides [Auto DevOps](/topics/devops/), which are prescribed out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, they automatically detect, build, test, deploy, and monitor your applications. Auto DevOps save your developers from implementing their own pipelines so that they can spend more time innovating. In the following paragraphs, we go over how the power of Auto DevOps automates and integrates your continuous delivery to help increase productivity and speed up releases.\n\n## Enabling Auto DevOps\n\nIt’s very easy to enable Auto DevOps for your application. All you need to do is go to your Project Settings and select the configuration you desire for Auto DevOps. As the picture below depicts, you can select the deployment strategy to “Automatic deployment to staging, manual deployment to production”:\n\n![autodevops-on](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-on.png){: .shadow.medium.center.wrap-text}\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process.\n\nThe pipeline then deploys the application to staging for verification and then to production in an incremental fashion. Auto DevOps saves you and your developers from implementing your own pipelines so that you can spend more time innovating.\n\n## Auto DevOps stages and jobs\n\nThe stages and jobs of the Auto DevOps pipeline vary according to the way you configured it. You can also customize the prescribed Auto DevOps pipeline or reuse only portions of it. Let’s review the prescribed stages and jobs for a simple Java application.\n\n1) First you find the Build stage. Auto Build creates a build of the application using an existing Dockerfile or buildpacks. The resulting Docker image is pushed to the built-in Container Registry. \n\n![auto-build](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-build.png){: .shadow.medium.center.wrap-text}\n\nAll these steps are automatically executed on your application so that you can spend more time delivering value to the business.\n\n2) Next is a variety of tests under the Test stage. Auto DevOps includes jobs for static analysis and code checks, For identifying security issues in containers, For analyzing project dependencies and security issues, For scanning license dependencies, For detecting credentials and secrets exposure, For running security analysis of Java code, And for specific unit tests for the language and framework.\n\n![auto-test](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-test.png){: .shadow.medium.center.wrap-text}\n\nAll these tests increase the quality of code, compliance and reliability that translate into a highly resilient production environment.\n\n3) The review stage contains a single job that spins up an ephemeral environment to be used by the Dynamic Application Security Testing or DAST. Likewise the Dast stage has the job, Auto Dynamic Application Security Testing, which analyzes the current code and checks for potential security issues by running (Open Web Application Security Project) OWASP-related tests.\n\n![auto-review-dast](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-dast.png){: .shadow.medium.center.wrap-text}\n\n4) The prescribed stages and jobs in Auto DevOps vary depending on how you configure it. In this example, the user has selected “Automatic deployment to staging, manual deployment to production” when enabling Auto DevOps, so towards the CD portion of the pipeline, we see the staging stage, which contains a single job. The staging job deploys the user’s application to the staging environment. It will also instantiate the staging environment, if needed.\n\n![auto-staging](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-staging.png){: .shadow.medium.center.wrap-text}\n\n5) The production stage is manual and contains four jobs to incrementally deploy his application to production. An incremental rollout decreases the risk of a production outage or downtime. By releasing production changes gradually, error rates or performance degradation can be monitored, and if there are no problems, all of production can be updated.\n\n![auto-prod](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-prod.png){: .shadow.medium.center.wrap-text}\n\n6) The user has been prescribed a performance stage with a single job with the same name. Auto Browser Performance Testing measures the browser performance of each web page and reports on any degradation or improvement so that appropriate action can be taken.\n\n![auto-browser-perf](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-browser-perf.png){: .shadow.medium.center.wrap-text}\n\n7) The last stage is the cleanup stage, which contains a job that brings down and frees all resources of the ephemeral DAST environment that was brought up earlier in the CI portion of the pipeline.\n\n![auto-cleanup](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-cleanup.png){: .shadow.medium.center.wrap-text}\n\nThis entire prescribed CI/CD pipeline, with all its stages and jobs, is based on best practices and is automatically run for the user’s project saving them time and effort from developing their own pipeline.\n\n## Auto Review Apps\n\nAs developers collaborate on a project, Auto DevOps automatically includes Auto Review Apps, which stands up an ephemeral environment for stakeholders to review the running application with proposed changes before they are merged to the main branch. The teardown and freeing of the resources of the ephemeral review environment are also automatically done by Auto DevOps once the merge takes place.\n\n![auto-review-apps](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-apps.png){: .shadow.medium.center.wrap-text}\n\n## Modifying Auto DevOps\n\nHere are some ways that you can modify Auto DevOps.\n\n1) **Customization via environment variables**. If you would like to skip some of the stages and jobs in Auto DevOps, you can do this via project variables. For example, say you are using all open source licensed software within your project and you are pretty confident about your web application performance, and you’d also like to add the ability to do canary deployments. You can customize Auto DevOps via environment variables to skip the license-scanning and performance jobs and add canary deployments to your project by creating and setting specific environment variables as shown below.\n\n![auto-env-vars](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-env-vars.png){: .shadow.medium.center.wrap-text}\n\nYou could also use the GitLab APIs to script these modifications if he so desired.\n\n2) **Customization by editing the DevOps pipeline**. Another way to customize the Auto DevOps pipeline is by adding it to your own project and then making changes to it.\nBelow you can see a screen snapshot of an Auto DevOps pipeline edit where LICENSE_MANAGEMENT and web PERFORMANCE tests are being disabled.\n\n![autodevops-pipeline-edit](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-pipeline-edit.png){: .shadow.medium.center.wrap-text}\n\n3) **Customization by using only portions of Auto DevOps**. You could also leverage portions of Auto DevOps in your own pipeline by including specific templates. In the smaller pipeline below, only the Auto Build and Auto Test capabilities of Auto DevOps are being reused.\n\n![autodevops-portions](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-portions.png){: .shadow.medium.center.wrap-text}\n\nThe power of Auto DevOps automates and integrates your continuous delivery to help speed up your releases by saving you time from having to write your own pipelines. By using Auto DevOps you can accelerate your product delivery times and bring differentiating application features faster to market.\n\nIf you’d like to see the power of GitLab Auto DevOps in action, watch this [video](https://youtu.be/blJT8f6ZDH8).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nPhoto by [Tim Carey](https://unsplash.com/@baudy?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/formula-1?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,765,877],"demo",{"slug":879,"featured":6,"template":701},"cd-automated-integrated","content:en-us:blog:cd-automated-integrated.yml","Cd Automated Integrated","en-us/blog/cd-automated-integrated.yml","en-us/blog/cd-automated-integrated",{"_path":885,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":886,"content":892,"config":897,"_id":899,"_type":14,"title":900,"_source":16,"_file":901,"_stem":902,"_extension":19},"/en-us/blog/cd-solution-overview",{"title":887,"description":888,"ogTitle":887,"ogDescription":888,"noIndex":6,"ogImage":889,"ogUrl":890,"ogSiteName":686,"ogType":687,"canonicalUrls":890,"schema":891},"How to use GitLab tools for continuous delivery","Learn how to use GitLab technology to release software faster and with less risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682956/Blog/Hero%20Images/CD-continuous-nature-cover-880x586.jpg","https://about.gitlab.com/blog/cd-solution-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab tools for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":887,"description":888,"authors":893,"heroImage":889,"date":894,"body":895,"category":741,"tags":896},[873],"2020-12-17","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-04-01.\n\nEach organization is unique in how they adopt continuous delivery (CD) principles, but the journey to modernize and enhance your software release process can be conducted in phases. In this blog post, we unpack some of the tools companies can use to adopt continuous delivery (CD), and explain how companies can reach continuous delivery in three key stages. The good news is, regardless of how you get there, GitLab offers a solution that allows companies to modernize their release process at their own pace and in their own way.\n\n## Consolidate disparate tools into a single platform\n\nThe first step to reaching [continuous delivery](/topics/continuous-delivery/) is to consolidate the number of disparate tools in your pipeline by using the tools and capabilities baked into the GitLab product. In this section, we summarize some of the fundamental components of GitLab and give examples of how they work.\n\nGitLab users can track issues and merge requests using [milestones](https://docs.gitlab.com/ee/user/project/milestones/#milestones), which also help with setting time-bound goals. Milestones can be used as Agile sprints and releases, and allow you to organize issues and merge requests into a one group, with an optional start date and an optional due date.\n\n![Example of GitLab milestone from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/milestone.png)\nScreenshot shows example milestone in GitLab.\n\n[Issues are a fundamental tool in GitLab](https://docs.gitlab.com/ee/user/project/issues/#issues), and include many components to help users communication information about product problems, new features, and more.\n\n[Merge requests (MRs) are created to merge one branch into another](https://docs.gitlab.com/ee/user/project/merge_requests/). MRs are also where solutions are developed and is a key input to the release planning process.\n\nBoth issues and MRs are core components of a release and allow for the audit and tracking of application changes created by a large group of DevOps engineers, system administrators, and developers. We often use Epics in the release planning process. [Epics are used to track groups of issues with the same theme](https://docs.gitlab.com/ee/user/group/epics/#epics). In the example below, an Epic was created for all the UI-related issues in a project.\n\n![Example of GitLab epic for frontend work](https://about.gitlab.com/images/blogimages/cd-solution-overview/epic.png)\nAn example of an Epic for frontend work in GitLab.\n\n[Iterations are a relatively new tool that allows users to track issues over time](https://docs.gitlab.com/ee/user/group/iterations/#iterations) and helps to track velocity and volatility metrics. Iterations can also be used with milestones and can track a project's sprints using the detailed iterations pages, which include many progress metrics.\n\n![Example iteration from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/iteration.png)\nThis screenshot shows an example of how iterations work in GitLab.\n\nThe [Roadmap tool assembles epics, milestones, and iterations in a timeline format](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap), which makes it easier to visually track all progress toward a release and helps the user streamline the release process.\n\n![Example of roadmap from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/roadmap.png)\nThis screenshot shows an example of roadmap in GitLab.\n\nGitLab offers many approval gates for your release. Set a [deploy freeze window](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html) to temporarily suspend automated deployments to production. The deploy freeze window prevents unintended production releases during a particular time frame to help reduce uncertainty and risk of unscheduled outages.\n\n![Example of deploy freeze window from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/freeze.png)\nThis screenshow shows an example deploy freeze window in GitLab.\n\nRelated to the deploy freeze window, users can protect the production environment for a release to prevent unintentional releases. Deploy freeze windows protect the production environment by specifying who is allowed to deploy to the environment. Assigning specific roles and responsibilities streamlines the approval gates and release process.\n\n![protected-env](https://about.gitlab.com/images/blogimages/cd-solution-overview/protected-env.png)\n\nWhen it's ready, the [user can create the release which automatically generates the release evidence](https://docs.gitlab.com/ee/api/releases/#collect-release-evidence). This streamlined process helps reduce release cycle times.\n\n![Example of release evidence from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/release-evidence.png)\nHere is an example of release evidence from a demo project in GitLab.\n\n## Implement continuous delivery\n\nThe capabilities described above help to establish some best practices for software continuous delivery. In this next phase of the CD cycle, every change is automatically deployed to the User Acceptance Testing env/Staging (with a manual deployment to production). In this scenario, there is no need for a deploy freeze, and the release manager can cut a release from staging at any point in time.\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) helps users automatically create the release pipeline and relieves them from manually creating a pipeline. With Auto DevOps, users can automatically deploy to the staging environment and manually deploy to production and enable canary deployments. Auto DevOps, which is based on DevOps best practices, helps you streamline the release process.\n\n![Example of enabling Auto DevOps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/enable-auto-devops.png)\nHow to enable Auto DevOps in GitLab.\n\nThe first job in Auto DevOps is the build job, as shown below:\n\n![build-job](https://about.gitlab.com/images/blogimages/cd-solution-overview/build-job.png)\nThe build job in GitLab Auto DevOps.\n\nThe build job applies the appropriate build strategy to create a Docker image of the application and stores it in the built-in Docker Registry.\n\n![Example of container registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/container-registry.png)\nSee the example of a container registry in GitLab.\n\nFaster and more reliable releases happen when you have build components like Docker images that are consistent, uniform, and readily available throughout the release process. GitLab also includes a built-in [Package Registry](https://docs.gitlab.com/ee/user/packages/) that supports many packaging technologies.\n\n![Example of package registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/package-registry.png)\nHere's what the package registry looks like in GitLab.\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps) allow the user to visualize what features will go into production. As updates are made to the application via MRs, the MRs kick off Review Apps, which streamlines the review process, including the automatic creation and destruction of an ephemeral review environment. Using Review Apps, stakeholders can verify the updates to the application before the changes are merged to the main line. Review Apps help increase code quality reducing the risk of unexpected production outages.\n\n![Example Review Apps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/review-apps.png)\nAn example of Review Apps in GitLab from a demo project.\n\nOnce an application is built and passes many automated tests, checks and verifications, the Auto DevOps pipeline automatically stands up a staging environment and deploys the application to staging.\n\n![Example staging environment from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/staging-env.png)\nAn example staging environment in GitLab.\n\nAt this point, a user can manually deploy the updated application as a canary deployment to the production environment. In doing so, a user ships features to only a portion of the pods fleet and watches their behavior as users visit the temporarily deployed feature. If everything checks out, the next step is to deploy the feature to production. After deploying to production, roll out the Canary deployment to 50% of the production pods. Incremental rollouts lower the risk of production outages and delivers a better user experience and customer satisfaction. Advanced deployment techniques, like canary, incremental, and Blue-Green also improve development and delivery efficiency, and streamlines the release process.\n\n![Example incremental rollout from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollout.png)\nHow incremental rollout works in GitLab.\n\n![live-env-button](https://about.gitlab.com/images/blogimages/cd-solution-overview/live-env-button.png)\nTo check the running application for integrity, you can click on the \"Open live environment\" button.\n\nClicking this button will open up the application in a different browser tab. But what if you run into an application error? As shown below:\n\n![Example application error from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/app-error.png)\nThis is what an application error will look like in GitLab.\n\nIf you encounter an app error, you could decide to perform a rollback by drilling down into the production environment page and identifying the release that had been running before the last deployment. This page is an auditable sequence of changes that have been applied to the production environment. The rollback process starts with the click of a button. Rollbacks speed up recovery of production in case of failures and lowers outage times, which improves the user experience.\n\n![Example rollback from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollback.png)\nRollback in GitLab to speed up production recovery.\n\nPipelines usually run automatically, but to schedule a pipeline once a day at midnight, for example, so staging can have the most recent version of the application each day, go to CI/CD->Schedules. Scheduling pipelines can improve the efficiency of the development life cycle and release processes.\n\n![Example of pipeline scheduling from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-sched.png)\nHow to schedule a pipeline to run in the future.\n\nWhile the application is running in production, track how the release is performing and quickly identify and troubleshoot any production issues. There are a few ways to do this. One way is to access the \"Monitoring\" feature for a specific environment to track system and application metrics, such as system and pod memory usage, and the number of cores used. The monitoring tracking includes markers (small rocket icon) when updates were introduced to the environment, so that fluctuations in the metrics can be correlated to a specific update.\n\n![Example monitoring capabilities from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/monitoring.png)\nExplore monitoring capabilities in GitLab.\n\nMonitoring reduces the time to identify, resolve and preempt production problems, which lowers the risk of unscheduled outages. It also provides an opportunity for monitoring business activity and optimizes cloud costs. This type of monitoring is not only useful to release managers but also to DevOps engineers, application operators, and platform engineers.\n\nAnother way to monitor the release is by creating alerts to detect out-of-range metrics, which are visible on the overall operations metrics dashboard as well as on each specific environment window. Alerts can also automatically trigger ChatOps and email messages to appropriate individuals or groups.\n\n![Example alerts from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts.png\nExample alerts in GitLab.\n\nYou can manage alerts from the [Operations Alerts window](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), a single location from which you can assess and handle alerts, which may include the manual or automatic rollback of a release.\n\n![Example alerts dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts-window.png)\nWhat the he alerts dashboard looks like on GitLab.\n\nUsers can track and monitor the release progress through [Value Stream Analytics](https://docs.gitlab.com/ee/development/value_stream_analytics.html#value-stream-analytics-development-guide), where you can check your project or group statistics over time and see how your team improves in the number of new issues, commits, deploys, and deployment frequency. Value Stream Analytics is useful to quickly determine the velocity of a given project. It points to bottlenecks in the development process, allowing management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle.\n\n![Example value stream analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/value-stream.png)\nValue stream analytics in GitLab.\n\nLastly, another way to track and monitor the release is through [Pipeline analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#pipeline-success-and-duration-charts). Pipeline analytics shows the history of your pipeline successes and failures, as well as how long each pipeline runs. This helps explain the health of your projects and their continuous delivery.\n\n![Example pipeline analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-analytics.png)\nScreenshot shows example pipeline analytics in GitLab.\n\nThe [Operations dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/#operations-dashboard) can contain more than one project, and allows users to oversee more than one release. This dashboard provides a summary of each project's operational health, including pipeline and alert status.\n\n![Example operations dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/ops-dashboard.png)\nExample of operations dashboard in GitLab.\n\nRelease managers can also access the [environments dashboard](https://docs.gitlab.com/ee/ci/environments/environments_dashboard.html#environments-dashboard) to provide a cross-project, environment-based view that lets you see the big picture of what is happening in each environment.\n\n![Example environments dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/env-dashboard.png)\nThe environments dashboard in GitLab.\n\nAnother option is to drill down into a specific environment to see all the updates applied to the environment.\n\n![Example production environment dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/prod-env-dashboard.png)\nThe production environment dashboards shows all updates applied to the environment.\n\nAll these dashboards offer operations insights that are necessary to understand how a release is performing in production and quickly identify and troubleshoot any production issues.\n\n## Implement continuous deployment\n\nThe third phase in the journey is continuous deployment, where users can send updates directly to production. Instead of manually triggering deplyments, continuous deployment sends changes to production production auomatically (no human intervention is required). Teams can only achieve continuous deployment once continuous delivery is already in place.\n\nTo introduce a feature to a segment of end-users in a controlled manner in production, create [feature flags](/blog/feature-flags-continuous-delivery/). Feature flags help reduce risk and let the user conduct controlled tests and separate feature delivery from customer launch.\n\n![Example feature flag from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/feature-flag.png)\nFeatures flags in GitLab.\n\nA project's audit events dashboard will record what user introduced a feature flag.\n\n![Example audit events dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/events-dashboard.png)\nScreenshot shows example audit events dashboard in GitLab.\n\nCheck security and compliance-related items of the project by visiting the [Security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#gitlab-security-dashboards-and-security-center).\n\n![Example security dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/sec-dashboard.png)\nThe security dashboard in GitLab.\n\nThese dashboards help you preempt out-of-compliance scenarios to avoid penalties. They also streamline audits, provide an opportunity to optimize cost, and lower risk of unscheduled production outages.\n\nWe have reviewed how GitLab can help you make your releases safe, low risk, worry-free, consistent, and repeatable.\n\nWhether you are just starting your journey into DevOps, or already in the midst of implementing DevOps processes, [GitLab's continuous delivery](/solutions/continuous-integration/) can help you every step of the way with capabilities built on DevOps and CD best practices.\n\n## Watch and learn\n\nMore of a video person? Tune in below to see GitLab’s continuous delivery solution in action.\n\n\u003Chttps://www.youtube-nocookie.com/embed/L0OFbZXs99U>\n\nFor more information, visit [LEARN@GITLAB](/learn/).\n",[9,765,877],{"slug":898,"featured":6,"template":701},"cd-solution-overview","content:en-us:blog:cd-solution-overview.yml","Cd Solution Overview","en-us/blog/cd-solution-overview.yml","en-us/blog/cd-solution-overview",{"_path":904,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":905,"content":911,"config":918,"_id":920,"_type":14,"title":921,"_source":16,"_file":922,"_stem":923,"_extension":19},"/en-us/blog/changes-to-the-preclonescript",{"title":906,"description":907,"ogTitle":906,"ogDescription":907,"noIndex":6,"ogImage":908,"ogUrl":909,"ogSiteName":686,"ogType":687,"canonicalUrls":909,"schema":910},"Guide to pre_clone_script changes on GitLab SaaS Linux Runners","Learn about the change from CI_PRE_CLONE_SCRIPT to pre_get_sources_script on GitLab SaaS Linux Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/changes-to-the-preclonescript","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Guide to pre_clone_script changes on GitLab SaaS Linux Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2023-03-27\",\n      }",{"title":906,"description":907,"authors":912,"heroImage":908,"date":914,"body":915,"category":808,"tags":916},[913],"Darren Eastman","2023-03-27","\n\nIn GitLab 16.0, on GitLab SaaS Runners on Linux, we are removing the `CI_PRE_CLONE_SCRIPT` variable support in CI/CD workflows. If you use the `CI_PRE_CLONE_SCRIPT` variable in your GitLab SaaS CI pipelines, you must change to the new method to ensure your workflows run as expected.\n\n## What is the pre_clone_script?\n\nThe [`pre_clone_script`](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section) configuration option is a powerful pre-build script feature that enables you to execute custom logic before a GitLab Runner clones the project repository and runs your CI jobs. For example, you could use this feature in your environment to automate the cleanup of files from the build directory that aren’t useful for subsequent builds. Other use cases include retrieving files needed for the build or running other commands before the git initialization of the build directory.\n\nTo use this feature on GitLab SaaS Runners on Linux, you must first define a project CI/CD variable, `CI_PRE_CLONE_SCRIPT`, and include that variable in the `.gitlab-ci.yml` pipeline file.\n\nWhile this Runner pre-build script hook configuration has proven helpful for our customers, we needed to devise a more straightforward solution, while introducing additional guard rails. Enter the new [`pre_get_sources_script`](https://docs.gitlab.com/ee/ci/yaml/index.html#hookspre_get_sources_script) keyword in the `.gilab-ci.yml` file syntax.\n\n## What is the pre_get_sources_script hook?\n\nThe `pre_get_sources_script` hook is a simple-to-use method that enables you to have your script executed by the GitLab Runner before the git clone, init, and CI build scripts. Using the new `pre_get_sources_script` script is as simple as entering the following syntax in your `.gitlab-ci.yml` pipeline file.\n\n``` yaml\ntest_job:\n   stage: test\n   hooks:\n      pre_get_sources_script:\n      - echo 'hello run commands here before fetching the project repository'\n   script:\n     - echo 'this is the start of my CI build job script\n\n```\n\nSince the hook now is visible as code in your pipeline, you have immediate visibility into the script the Runner will execute before running the build job.\n\n## How to prepare for `pre_get_sources_script`?\n\nTo prepare for the change to `pre_get_sources_script` in GitLab 16.0, follow these steps: \n\n1. Check your CI jobs on GitLab SaaS to confirm if the `CI_PRE_CLONE_SCRIPT` variable is used.\n1. If the `CI_PRE_CLONE_SCRIPT` is used, then replace the script definition with a `pre_get_sources_script` hook in your `.gitlab-ci.yml` file.\n1. If you have any issues during testing of your pipelines with `pre_get_sources_script`, connect with us by leaving a comment below.\n\n## What's next: Support for `post_get_source`\n\nOn self-managed GitLab Runners, the `pre_get_sources_script` hook is only one of many hooks you can use to run code in various CI/CD pipeline stages. Those hooks include `post_get_sources`, `pre_build`, and `post_build` hooks, configurable only on the Runner host. More details are available in the [`[[runners]]`](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section) section in the advanced configuration documentation.\n\nIn the future, we plan to add support for `post_get_sources` in the YAML syntax of the `gitlab-ci.yml` pipeline.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\n",[721,9,917],"features",{"slug":919,"featured":6,"template":701},"changes-to-the-preclonescript","content:en-us:blog:changes-to-the-preclonescript.yml","Changes To The Preclonescript","en-us/blog/changes-to-the-preclonescript.yml","en-us/blog/changes-to-the-preclonescript",{"_path":925,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":926,"content":932,"config":939,"_id":941,"_type":14,"title":942,"_source":16,"_file":943,"_stem":944,"_extension":19},"/en-us/blog/ci-deployment-and-environments",{"title":927,"description":928,"ogTitle":927,"ogDescription":928,"noIndex":6,"ogImage":929,"ogUrl":930,"ogSiteName":686,"ogType":687,"canonicalUrls":930,"schema":931},"How to use GitLab CI to deploy to multiple environments","We walk you through different scenarios to demonstrate the versatility and power of GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662033/Blog/Hero%20Images/intro.jpg","https://about.gitlab.com/blog/ci-deployment-and-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI to deploy to multiple environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Nemytchenko\"},{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-02-05\",\n      }",{"title":927,"description":928,"authors":933,"heroImage":929,"date":935,"body":936,"category":741,"tags":937,"updatedDate":938},[934,873],"Ivan Nemytchenko","2021-02-05","This post is a success story of one imaginary news portal, and you're the\nhappy\n\nowner, the editor, and the only developer. Luckily, you already host your\nproject\n\ncode on GitLab.com and know that you can\n\n[run tests with GitLab CI/CD](https://docs.gitlab.com/ee/ci/testing/).\n\nNow you’re curious if it can be [used for\ndeployment](/blog/how-to-keep-up-with-ci-cd-best-practices/), and how far\ncan you go with it.\n\n\nTo keep our story technology stack-agnostic, let's assume that the app is\njust a\n\nset of HTML files. No server-side code, no fancy JS assets compilation.\n\n\nDestination platform is also simplistic – we will use [Amazon\nS3](https://aws.amazon.com/s3/).\n\n\nThe goal of the article is not to give you a bunch of copy-pasteable\nsnippets.\n\nThe goal is to show the principles and features of [GitLab\nCI](/solutions/continuous-integration/) so that you can easily apply them to\nyour technology stack.\n\n{: .alert .alert-warning}\n\n\nLet’s start from the beginning. There's no continuous integration (CI) in\nour story yet.\n\n\n## At the starting line\n\n\n**Deployment**: In your case, it means that a bunch of HTML files should\nappear on your\n\nS3 bucket (which is already configured for\n\n[static website\nhosting](http://docs.aws.amazon.com/AmazonS3/latest/dev/HowDoIWebsiteConfiguration.html?shortFooter=true)).\n\n\nThere are a million ways to do it. We’ll use the\n\n[awscli](http://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#examples)\nlibrary,\n\nprovided by Amazon.\n\n\nThe full command looks like this:\n\n\n```shell\n\naws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n\n```\n\n\n![Manual\ndeployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/13.jpg){:\n.center}\n\nPushing code to repository and deploying are separate processes.\n\n{: .note .text-center}\n\n\nImportant detail: The command\n\n[expects\nyou](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#config-settings-and-precedence)\n\nto provide `AWS_ACCESS_KEY_ID` and  `AWS_SECRET_ACCESS_KEY` environment\n\nvariables. Also you might need to specify `AWS_DEFAULT_REGION`.\n\n{: .alert .alert-info}\n\n\nLet’s try to automate it using [GitLab\nCI](/solutions/continuous-integration/).\n\n\n## The first automated deployment\n\n\nWith GitLab, there's no difference on what commands to run.\n\nYou can set up GitLab CI in a way that tailors to your specific needs, as if\nit was your local terminal on your computer. As long as you execute commands\nthere, you can tell CI to do the same for you in GitLab.\n\nPut your script to `.gitlab-ci.yml` and push your code – that’s it: CI\ntriggers\n\na _job_ and your commands are executed.\n\n\nNow, let's add some context to our story: Our website is small, there is\n20-30 daily\n\nvisitors and the code repository has only one default branch: `main`.\n\n\nLet's start by specifying a _job_ with the command from above in the\n`.gitlab-ci.yml` file:\n\n\n```yaml\n\ndeploy:\n  script: aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n\nNo luck:\n\n![Failed\ncommand](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/fail1.png){:\n.shadow}\n\n\nIt is our _job_ to ensure that there is an `aws` executable.\n\nTo install `awscli` we need `pip`, which is a tool for Python packages\ninstallation.\n\nLet's specify Docker image with preinstalled Python, which should contain\n`pip` as well:\n\n\n```yaml\n\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n\n![Automated\ndeployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/14.jpg){:\n.center}\n\nYou push your code to GitLab, and it is automatically deployed by CI.\n  {: .note .text-center}\n\nThe installation of `awscli` extends the job execution time, but that is not\na big\n\ndeal for now. If you need to speed up the process, you can always [look for\n\na Docker image](https://hub.docker.com/explore/) with preinstalled `awscli`,\n\nor create an image by yourself.\n\n{: .alert .alert-warning}\n\n\nAlso, let’s not forget about these environment variables, which you've just\ngrabbed\n\nfrom [AWS Console](https://console.aws.amazon.com/):\n\n\n```yaml\n\nvariables:\n  AWS_ACCESS_KEY_ID: \"AKIAIOSFODNN7EXAMPLE\"\n  AWS_SECRET_ACCESS_KEY: \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nIt should work, but keeping secret keys open, even in a private repository,\n\nis not a good idea. Let's see how to deal with this situation.\n\n\n### Keeping secret things secret\n\n\nGitLab has a special place for secret variables: **Settings > CI/CD >\nVariables**\n\n\n![Picture of Variables\npage](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/add-variable-updated.png)\n\n\nWhatever you put there will be turned into **environment variables**.\n\nChecking the \"Mask variable\" checkbox will obfuscate the variable in job\nlogs. Also, checking the \"Protect variable\" checkbox will export the\nvariable to only pipelines running on protected branches and tags. Users\nwith Owner or Maintainer permissions to a project will have access to this\nsection.\n\n\nWe could remove `variables` section from our CI configuration. However,\nlet’s use it for another purpose.\n\n\n### How to specify and use variables that are not secret\n\n\nWhen your configuration gets bigger, it is convenient to keep some of the\n\nparameters as variables at the beginning of your configuration. Especially\nif you\n\nuse them in more than one place. Although it is not the case in our\nsituation yet,\n\nlet's set the S3 bucket name as a\n[**variable**](https://docs.gitlab.com/ee/ci/variables/) for the purpose of\nthis demonstration:\n\n\n```yaml\n\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n\nSo far so good:\n\n\n![Successful\nbuild](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/build.png){:\n.shadow.medium.center}\n\n\nIn our hypothetical scenario, the audience of your website has grown, so\nyou've hired a developer to help you.\n\nNow you have a team. Let's see how teamwork changes the GitLab CI workflow.\n\n\n## How to use GitLab CI with a team\n\n\nNow, that there are two users working in the same repository, it is no\nlonger convenient\n\nto use the `main` branch for development. You decide to use separate\nbranches\n\nfor both new features and new articles and merge them into `main` when they\nare ready.\n\n\nThe problem is that your current CI config doesn’t care about branches at\nall.\n\nWhenever you push anything to GitLab, it will be deployed to S3.\n\n\nPreventing this problem is straightforward. Just add `only: main` to your\n`deploy` job.\n\n\n![Automated deployment of main\nbranch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/15-updated.png){:\n.center}\n\nYou don't want to deploy every branch to the production website but it would\nalso be nice to preview your changes from feature-branches somehow.\n\n{: .note .text-center}\n\n\n### How to set up a separate place for testing code\n\n\nThe person you recently hired, let's call him Patrick, reminds you that\nthere is a featured called\n\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/). It looks like a perfect\ncandidate for\n\na place to preview your work in progress.\n\n\nTo [host websites on GitLab Pages](/blog/gitlab-pages-setup/) your CI\nconfiguration file should satisfy three simple rules:\n\n\n- The _job_ should be named `pages`\n\n- There should be an `artifacts` section with folder `public` in it\n\n- Everything you want to host should be in this `public` folder\n\n\nThe contents of the public folder will be hosted at\n`http://\u003Cusername>.gitlab.io/\u003Cprojectname>/`\n\n{: .alert .alert-info}\n\n\nAfter applying the [example config for plain-html\nwebsites](https://gitlab.com/pages/plain-html/blob/master/.gitlab-ci.yml),\n\nthe full CI configuration looks like this:\n\n\n```yaml\n\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\n\nWe specified two jobs. One job deploys the website for your customers to S3\n(`deploy`).\n\nThe other one (`pages`) deploys the website to GitLab Pages.\n\nWe can name them \"Production environment\" and \"Staging environment\",\nrespectively.\n\n\n![Deployment to two\nplaces](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/16-updated.png){:\n.center}\n\nAll branches, except main, will be deployed to GitLab Pages.\n\n{: .note .text-center}\n\n\n## Introducing environments\n\n\nGitLab offers\n [support for environments](https://docs.gitlab.com/ee/ci/environments/) (including dynamic environments and static environments),\n and all you need to do it to specify the corresponding environment for each deployment *job*:\n\n```yaml\n\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  environment: staging\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\n\nGitLab keeps track of your deployments, so you always know what is currently\nbeing deployed on your servers:\n\n\n![List of\nenvironments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/envs-updated.png){:\n.shadow.center}\n\n\nGitLab provides full history of your deployments for each of your current\nenvironments:\n\n\n![List of deployments to staging\nenvironment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/staging-env-detail-updated.png){:\n.shadow.center}\n\n\n![Environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/17-updated.png){:\n.center}\n\n\nNow, with everything automated and set up, we’re ready for the new\nchallenges that are just around the corner.\n\n\n## How to troubleshoot deployments\n\n\nIt has just happened again.\n\nYou've pushed your feature-branch to preview it on staging and a minute\nlater Patrick pushed\n\nhis branch, so the staging environment was rewritten with his work. Aargh!!\nIt was the third time today!\n\n\nIdea! \u003Ci class=\"far fa-lightbulb\" style=\"color:#FFD900; font-size:.85em\"\naria-hidden=\"true\">\u003C/i> Let's use Slack to notify us of deployments, so that\npeople will not push their stuff if another one has been just deployed!\n\n\n> Learn how to [integrate GitLab with\nSlack](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html).\n\n\n## Teamwork at scale\n\n\nAs the time passed, your website became really popular, and your team has\ngrown from two people to eight people.\n\nPeople develop in parallel, so the situation when people wait for each other\nto\n\npreview something on Staging has become pretty common. \"Deploy every branch\nto staging\" stopped working.\n\n\n![Queue of branches for review on\nStaging](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/queue.jpg){:\n.center}\n\n\nIt's time to modify the process one more time. You and your team agreed that\nif\n\nsomeone wants to see their changes on the staging\n\nserver, they should first merge the changes to the \"staging\" branch.\n\n\nThe change of `.gitlab-ci.yml` is minimal:\n\n\n```yaml\n\nexcept:\n\n- main\n\n```\n\n\nis now changed to\n\n\n```yaml\n\nonly:\n\n- staging\n\n```\n\n\n![Staging\nbranch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/18-updated.png){:\n.center}\n\nPeople have to merge their feature branches before preview on the staging\nserver.\n\n{: .note .text-center}\n\n\nOf course, it requires additional time and effort for merging, but everybody\nagreed that it is better than waiting.\n\n\n### How to handle emergencies\n\n\nYou can't control everything, so sometimes things go wrong. Someone merged\nbranches incorrectly and\n\npushed the result straight to production exactly when your site was on top\nof HackerNews.\n\nThousands of people saw your completely broken layout instead of your shiny\nmain page.\n\n\nLuckily, someone found the **Rollback** button, so the\n\nwebsite was fixed a minute after the problem was discovered.\n\n\n![List of\nenvironments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/prod-env-rollback-arrow-updated.png){:\n.shadow.center}\n\nRollback relaunches the previous job with the previous commit\n\n{: .note .text-center}\n\n\nAnyway, you felt that you needed to react to the problem and decided to turn\noff\n\nauto-deployment to Production and switch to manual deployment.\n\nTo do that, you needed to add `when: manual` to your _job_.\n\n\nAs you expected, there will be no automatic deployment to Production after\nthat.\n\nTo deploy manually go to **CI/CD > Pipelines**, and click the button:\n\n\n![Skipped job is available for manual\nlaunch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/manual-pipeline-arrow-updated.png){:\n.shadow.center}\n\n\nFast forward in time. Finally, your company has turned into a corporation.\nNow, you have hundreds of people working on the website,\n\nso all the previous compromises no longer work.\n\n\n### Time to start using Review Apps\n\n\nThe next logical step is to boot up a temporary instance of the application\nper feature branch for review.\n\n\nIn our case, we set up another bucket on S3 for that. The only difference is\nthat\n\nwe copy the contents of our website to a \"folder\" with the name of the\n\nthe development branch, so that the URL looks like this:\n\n\n`http://\u003CREVIEW_S3_BUCKET_NAME>.s3-website-us-east-1.amazonaws.com/\u003Cbranchname>/`\n\n\nHere's the replacement for the `pages` _job_ we used before:\n\n\n```yaml\n\nreview apps:\n  variables:\n    S3_BUCKET_NAME: \"reviewbucket\"\n  image: python:latest\n  environment: review\n  script:\n  - pip install awscli\n  - mkdir -p ./$CI_BUILD_REF_NAME\n  - cp ./*.html ./$CI_BUILD_REF_NAME/\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n\nThe interesting thing is where we got this `$CI_BUILD_REF_NAME` variable\nfrom.\n\nGitLab predefines [many environment\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nso that you can use them in your jobs.\n\n\nNote that we defined the `S3_BUCKET_NAME` variable inside the *job*. You can\ndo this to rewrite top-level definitions.\n\n{: .alert .alert-info}\n\n\nVisual representation of this configuration:\n\n![Review apps]![How to use GitLab CI - update - 19 -\nupdated](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/19-updated.png){:\n.illustration}\n\n\nThe details of the Review Apps implementation varies widely, depending upon\nyour real technology\n\nstack and on your deployment process, which is outside the scope of this\nblog post.\n\n\nIt will not be that straightforward, as it is with our static HTML website.\n\nFor example, you had to make these instances temporary, and booting up these\ninstances\n\nwith all required software and services automatically on the fly is not a\ntrivial task.\n\nHowever, it is doable, especially if you use Docker containers, or at least\nChef or Ansible.\n\n\nWe'll cover deployment with Docker in a future blog post.\n\nI feel a bit guilty for simplifying the deployment process to a simple HTML\nfiles copying, and not\n\nadding some hardcore scenarios. If you need some right now, I recommend you\nread the article [\"Building an Elixir Release into a Docker image using\nGitLab\nCI.\"](/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1/)\n\n\nFor now, let's talk about one final thing.\n\n\n### Deploying to different platforms\n\n\nIn real life, we are not limited to S3 and GitLab Pages. We host, and\ntherefore,\n\ndeploy our apps and packages to various services.\n\n\nMoreover, at some point, you could decide to move to a new platform and will\nneed to rewrite all your deployment scripts.\n\nYou can use a gem called `dpl` to minimize the damage.\n\n\nIn the examples above we used `awscli` as a tool to deliver code to an\nexample\n\nservice (Amazon S3).\n\nHowever, no matter what tool and what destination system you use, the\nprinciple is the same:\n\nYou run a command with some parameters and somehow pass a secret key for\nauthentication purposes.\n\n\nThe `dpl` deployment tool utilizes this principle and provides a\n\nunified interface for [this list of\nproviders](https://github.com/travis-ci/dpl#supported-providers).\n\n\nHere's how a production deployment _job_ would look if we use `dpl`:\n\n\n```yaml\n\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: ruby:latest\n  script:\n  - gem install dpl\n  - dpl --provider=s3 --bucket=$S3_BUCKET_NAME\n  only:\n  - main\n```\n\n\nIf you deploy to different systems or change destination platform\nfrequently, consider\n\nusing `dpl` to make your deployment scripts look uniform.\n\n\n## Five key takeaways\n\n\n1. Deployment is just a command (or a set of commands) that is regularly\nexecuted. Therefore it can run inside GitLab CI.\n\n2. Most times you'll need to provide some secret key(s) to the command you\nexecute. Store these secret keys in **Settings > CI/CD > Variables**.\n\n3. With GitLab CI, you can flexibly specify which branches to deploy to.\n\n4. If you deploy to multiple environments, GitLab will conserve the history\nof deployments,\n\nwhich allows you to rollback to any previous version.\n\n5. For critical parts of your infrastructure, you can enable manual\ndeployment from GitLab interface, instead of automated deployment.\n\n\n\u003Cstyle>\n\nimg.illustration {\n  padding-left: 12%;\n  padding-right: 12%;\n\n}\n\n@media (max-width: 760px) {\n  img.illustration {\n    padding-left: 0px;\n    padding-right: 0px;\n  }\n}\n\n\u003C/style>\n",[721,9,766],"2024-07-22",{"slug":940,"featured":6,"template":701},"ci-deployment-and-environments","content:en-us:blog:ci-deployment-and-environments.yml","Ci Deployment And Environments","en-us/blog/ci-deployment-and-environments.yml","en-us/blog/ci-deployment-and-environments",{"_path":946,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":947,"content":953,"config":960,"_id":962,"_type":14,"title":963,"_source":16,"_file":964,"_stem":965,"_extension":19},"/en-us/blog/cicd-tunnel-impersonation",{"title":948,"description":949,"ogTitle":948,"ogDescription":949,"noIndex":6,"ogImage":950,"ogUrl":951,"ogSiteName":686,"ogType":687,"canonicalUrls":951,"schema":952},"Fine-grained permissions with impersonation in CI/CD tunnel","Learn how to use use fine-grained permissions via generic impersonation in CI/CD Tunnel","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667435/Blog/Hero%20Images/tunnel.jpg","https://about.gitlab.com/blog/cicd-tunnel-impersonation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use fine-grained permissions via generic impersonation in CI/CD Tunnel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2022-02-01\",\n      }",{"title":954,"description":949,"authors":955,"heroImage":950,"date":956,"body":957,"category":741,"tags":958},"How to use fine-grained permissions via generic impersonation in CI/CD Tunnel",[873],"2022-02-01","\nThe [CI/CD Tunnel](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html), which leverages the [GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), enables users to access Kubernetes clusters from GitLab CI/CD jobs. In this blog post, we review how you can securely access your clusters from your CI/CD pipelines by using generic impersonation. In addition, we will briefly cover the activity list of the GitLab Agent for Kubernetes, a capability recently introduced by GitLab, that can help you detect and troubleshoot faulty events.\n\n## Using impersonation with your CI/CD tunnel\n\nThe CI/CD Tunnel leverages the GitLab Agent for Kubernetes, which permits the secure connectivity between GitLab and your Kubernetes cluster without the need to expose your cluster to the internet and outside your firewall. The CI/CD Tunnel allows you to connect to your Kubernetes cluster from your CI/CD jobs/pipelines.\n\nBy default, the CI/CD Tunnel inherits all the permissions from the service account used to install the Agent in the cluster. However, fine-grained permissions can be used in conjunction with the CI/CD Tunnel to restrict and manage access to your cluster resources.\n\nFine-grained permissions control with the CI/CD tunnel via impersonation:\n\n- Allows you to leverage your K8s authorization capabilities to limit the permissions of what can be done with the CI/CD tunnel on your running cluster\n\n- Lowers the risk of providing unlimited access to your K8s cluster with the CI/CD tunnel\n\n- Segments fine-grained permissions with the CI/CD tunnel at the project or group level\n\n- Controls permissions with the CI/CD tunnel at the username or service account\n\nTo restrict access to your cluster, you can use impersonation. To specify impersonations, use the access_as attribute in your Agent's configuration file and use Kubernetes RBAC rules to manage impersonated account permissions.\n\nYou can impersonate:\n- The Agent itself (default)\n= The CI job that accesses the cluster\n- A specific user or system account defined within the cluster\n\n## Steps to exercise impersonation with the CI/CD Tunnel\n\nLet's go through the steps on how you can exercise impersonation with the CI/CD Tunnel.\n\n### Creating your Kubernetes cluster\n\nIn order to exercise the capabilities described above, we need a Kubernetes cluster. Although, you can use any Kubernetes distribution, for this example, we create a GKE Standard Kubernetes cluster and name it \"csaavedra-ga4k-cluster\". We select the zone and version 1.21 of Kubernetes and ensure that our cluster will have three nodes. We leave the security and metadata screens with their defaulted values and click on the create button:\n\n![Creating a GKE cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/0-gke-creation.png){: .shadow.medium.center.wrap-text}\nCreating a GKE cluster\n{: .note.text-center}\n\n### Sample projects to be used\n\nLet's proceed now to this [top-level group](https://gitlab.com/tech-marketing/sandbox/gl-14-5-cs-demos), which contains three projects, which we will use to show impersonation with the CI/CD tunnel. You can do this at the project or group level. In this example, we will show setting impersonation at the project level:\n\n![Project structure in GitLab](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/1-project-struct.png){: .shadow.medium.center.wrap-text}\nProject structure in GitLab\n{: .note.text-center}\n\nProject \"ga4k\" will configure the GitLab Agent for Kubernetes and also set impersonations with the CI/CD tunnel. Project \"sample-application\" will use the CI/CD tunnel, managed by the agent, to connect to the Kubernetes cluster and execute a pipeline using different impersonations. Project \"cluster-management\" will also use the CI/CD tunnel to connect to the cluster and install the Ingress application on it.\n\nNot only does the CI/CD tunnel streamline the deployment, management, and monitoring of Kubernetes-native applications, but it also does it securely and safely by using impersonations that leverage your Kubernetes cluster's RBAC rules.\n\nProject \"ga4k\" contains and manages the configuration for the GitLab Agent for K8s called \"csaavedra-agentk\". Looking at its \"config.yaml\" file, we see that the agent points to itself for manifest projects, but most importantly, it provides CI/CD tunnel access to two projects: \"sample-application\" and \"cluster-management\". This means that these two projects' CI/CD pipelines will have access to the K8s cluster that the agent is securely connected to:\n\n![The GitLab Agent for K8s configuration](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/2-agent-config.png){: .shadow.medium.center.wrap-text}\nThe GitLab Agent for K8s configuration\n{: .note.text-center}\n\nProject \"sample-application\" has a pipeline, which we will later execute under different impersonations. And project \"cluster-management\" has a pipeline that will install only the Ingress application on the Kubernetes cluster, as configured in its helmfile.yaml file:\n\n![Deployable applications in cluster-management project](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/3-cluster-mgmt-helmfile.png){: .shadow.medium.center.wrap-text}\nDeployable applications in cluster-management project\n{: .note.text-center}\n\n### Connecting the Agent to your Kubernetes cluster\n\nLet's head back to project \"ga4k\" and connect to the Kubernetes cluster via the agent. We select agent \"csaavedra-agentk\" to register with GitLab:\n\n![List of defined agents](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/4-agents-popdown.png){: .shadow.medium.center.wrap-text}\nList of defined agents\n{: .note.text-center}\n\nThis step generates a token that we can use to install the agent on the cluster. We copy the Docker command to our local desktop for later use. Notice that the command includes the generated token, which you can also copy:\n\n![Docker command to deploy agent to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/5-docker-cmd.png){: .shadow.medium.center.wrap-text}\nDocker command to deploy agent to your K8s cluster\n{: .note.text-center}\n\nFrom a local command window, we ensure that our connectivity parameters to GCP are correct:\n\n![Checking your GCP connectivity parameters](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/6-gcp-connectivity.png){: .shadow.medium.center.wrap-text}\nChecking your GCP connectivity parameters\n{: .note.text-center}\n\nWe then add the credentials to our kubeconfig file to connect to our newly created Kubernetes cluster \"csaavedra-ga4k-cluster\" and verify that our context is set to it:\n\n![Adding your cluster credentials to your kubeconfig](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/7-adding-creds.png){: .shadow.medium.center.wrap-text}\nAdding the credentials of your cluster to your kubeconfig\n{: .note.text-center}\n\nOnce this is done, we can list all the pods that are up and running on the cluster by entering `kubectl get pods –all-namespaces`:\n\n![Listing the pods in your running cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/8-listing-pods.png){: .shadow.medium.center.wrap-text}\nListing the pods in your running cluster\n{: .note.text-center}\n\nFinally, we paste the docker command that will install the GitLab Agent for Kubernetes to this cluster making sure that its namespace is \"ga4k-agent\":\n\n![Deploying the agent to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/9-pasted-docker-cmd.png){: .shadow.medium.center.wrap-text}\nDeploying the agent to your K8s cluster\n{: .note.text-center}\n\nWe list the pods one more time to check that the agent pod is up and running on the cluster:\n\n![Agent up and running on your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/10-agent-up.png){: .shadow.medium.center.wrap-text}\nAgent up and running on your K8s cluster\n{: .note.text-center}\n\nThe screen will refresh and show our Kubernetes cluster connected via the agent:\n\n![Agent connected to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/11-agent-connected.png){: .shadow.large.center.wrap-text}\nAgent connected to your K8s cluster\n{: .note.text-center}\n\n### The Agent's Activity Information page\n\nClicking on the agent name takes us to the Agent's Activity Information page, which lists agent events in real time. This information can help monitor your cluster's activity and detect and troubleshoot faulty events from your cluster. Connection and token information is currently listed with more events coming in future releases:\n\n![Agent activity information page](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/12-agent-activity.png){: .shadow.small.center.wrap-text}\nAgent activity information page\n{: .note.text-center}\n\n### Deploying Ingress to your Kubernetes cluster using default impersonation\n\nBy default, the CI/CD Tunnel inherits all the permissions from the service account used to install the agent in the cluster. Per the agent's configuration, the CI/CD pipelines of the \"cluster-management\" project will have access to the K8s cluster that the agent is securely connected to. Let's leverage this connectivity to deploy the Ingress application to the Kubernetes cluster from project \"cluster-management\". Let's make a small update to the project pipeline to launch it. Once the pipeline launches, we navigate to its detail view to track its completion:\n\n![Project \"cluster-management\" pipeline completed](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/13-cluster-mgmt-pipeline.png){: .shadow.small.center.wrap-text}\nProject \"cluster-management\" pipeline completed\n{: .note.text-center}\n\nand check the log of its **apply** job to verify that it was able to switch to the agent's context and successfully ran all the installation steps:\n\n![Ingress deployed to your cluster via CI/CD Tunnel using default impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/14-apply-job-log.png){: .shadow.medium.center.wrap-text}\nIngress deployed to your cluster via CI/CD Tunnel using default impersonation\n{: .note.text-center}\n\nFor further verification, we list the pods in the cluster and check that the ingress pods are up and running:\n\n![Ingress pods up and running](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/15-ingress-pods-up.png){: .shadow.medium.center.wrap-text}\nIngress pods up and running on your cluster\n{: .note.text-center}\n\n### Start trailing the agent's log file to watch updates\n\nBefore we start the impersonation use cases, let's start trailing the agent's log file from a command window:\n\n![Trailing agent log from the command line](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/16-trail-agent-log.png){: .shadow.medium.center.wrap-text}\nTrailing agent log from the command line\n{: .note.text-center}\n\nAnd also let's increase its logging to debug:\n\n![Increasing the agent log level to debug](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/17-agent-logging-level.png){: .shadow.medium.center.wrap-text}\nIncreasing the agent log level to debug\n{: .note.text-center}\n\n### Running impersonation using access_as:ci_job\n\nLet's now impersonate the CI job that accesses the cluster. For this, we modify the agent's configuration and add the \"access_as\" attribute with the \"ci_job\" tag under it:\n\n![Impersonating the CI job](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/18-ci-job-impersonation.png){: .shadow.medium.center.wrap-text}\nImpersonating the CI job\n{: .note.text-center}\n\nAs we save the updated configuration, we verify in the log output that the update has taken place in the running agent:\n\n![Agent updated with CI job impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/19-agent-conf-updated.png){: .shadow.large.center.wrap-text}\nAgent updated with CI job impersonation\n{: .note.text-center}\n\nNotice that the pipeline of the \"sample-application\" project has a test stage and a test job. It sets the variable KUBE_CONTEXT first, loads an image with the version of kubectl that matches the version of the K8s cluster, and executes two kubectl commands that access the remote cluster via the agent:\n\n![Project \"sample-application\" pipeline](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/20-sample-application-pipeline.png){: .shadow.medium.center.wrap-text}\nProject \"sample-application\" pipeline\n{: .note.text-center}\n\nWe manually execute the pipeline of the \"sample-application\" project and verify in the job log output that the context switch was successful and that the kubectl commands executed correctly:\n\n![Job log output with CI impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/21-ci-impersonation-job-log.png){: .shadow.medium.center.wrap-text}\nJob log output with CI impersonation\n{: .note.text-center}\n\n### Running impersonation using access_as:impersonate:username\n\nThe last use case is the impersonation of a specific user or system account defined within the cluster. I have pre-created a service account called \"jane\" on the Kubernetes cluster under the \"default\" namespace. And \"jane\" has been given the permission to do a \"get\", \"list\", and \"watch\" on the cluster pods as you can see by the output in the command window:\n\n![Jane user with permission to list pods](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/22-jane-and-perms.png){: .shadow.medium.center.wrap-text}\nJane user with permission to list pods\n{: .note.text-center}\n\nRemember that the service account \"gitlab-agent\" under namespace \"ga4k-agent\" was created earlier when we installed the agent by running the Docker command. In order for the agent to be able to impersonate another service account or user, it needs to have the permissions to do so. We do this by creating a clusterrole \"impersonate\" for impersonating users, groups, and service accounts, and then create a clusterrolebinding \"allowimpersonator\" to give these permissions for the \"default\" namespace to the agent \"gitlab-agent\" in the \"ga4k-agent\" namespace:\n\n![Giving impersonation permission to agent](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/23-clusterrole-perm-to-agent.png){: .shadow.large.center.wrap-text}\nGiving impersonation permission to agent\n{: .note.text-center}\n\nWe then edit the agent's configuration and add the \"impersonate\" attribute and provide the service account for \"jane\" as the parameter for the \"username\" tag:\n\n![Impersonating a specific user](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/24-user-impersonation.png){: .shadow.medium.center.wrap-text}\nImpersonating a specific user called jane\n{: .note.text-center}\n\nAs we commit the changes, we check the log output to verify that the update has taken place in the running agent:\n\n![Agent updated with user impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/25-agent-conf-updated.png){: .shadow.large.center.wrap-text}\nAgent updated with user impersonation\n{: .note.text-center}\n\nSince we know that \"jane\" has the permission to list the running pods in the cluster, let's head to the project \"sample-application\" pipeline and add the command \"kubectl get pods –all-namespaces\" to it:\n\n![Adding get pods command that jane is allowed to run](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/26-adding-get-pods-cmd.png){: .shadow.medium.center.wrap-text}\nAdding get pods command that jane is allowed to run\n{: .note.text-center}\n\nWe commit the update and head over to the running pipeline and drill into the \"test\" job log output to see that the context switch was successful and that the kubectl commands executed correctly, including the listing of the running pods in the cluster:\n\n![Job output for pipeline impersonation jane](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/27-user-impersonation-job-log.png){: .shadow.medium.center.wrap-text}\nJob output for pipeline impersonation jane\n{: .note.text-center}\n\n## Conclusion\n\nIn this blog post, we reviewed how you can securely access your Kubernetes clusters from your CI/CD pipelines by using generic impersonation.  In addition, we showed the activity list of the GitLab Agent for Kubernetes, which can help you detect and troubleshoot faulty events from your cluster.\n\nTo see these capabilities in action, check out the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/j8SJuHd7Zsw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by Jakob Søby on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[810,721,9,959],"kubernetes",{"slug":961,"featured":6,"template":701},"cicd-tunnel-impersonation","content:en-us:blog:cicd-tunnel-impersonation.yml","Cicd Tunnel Impersonation","en-us/blog/cicd-tunnel-impersonation.yml","en-us/blog/cicd-tunnel-impersonation",{"_path":967,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":968,"content":974,"config":981,"_id":983,"_type":14,"title":984,"_source":16,"_file":985,"_stem":986,"_extension":19},"/en-us/blog/demystifying-ci-cd-variables",{"title":969,"description":970,"ogTitle":969,"ogDescription":970,"noIndex":6,"ogImage":971,"ogUrl":972,"ogSiteName":686,"ogType":687,"canonicalUrls":972,"schema":973},"GitLab environment variables demystified","CI/CD variables are useful (and flexible) tools to control jobs and pipelines. We unpack everything you need to know about GitLab environment variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664679/Blog/Hero%20Images/blog-image-template-1800x945__24_.png","https://about.gitlab.com/blog/demystifying-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab environment variables demystified\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-04-09\",\n      }",{"title":969,"description":970,"authors":975,"heroImage":971,"date":977,"body":978,"category":741,"tags":979,"updatedDate":980},[976],"Veethika Mishra","2021-04-09","There is a lot of flexibility when it comes to defining and using variables\nfor [CI/CD](https://about.gitlab.com/topics/ci-cd/). Variables are extremely\nuseful for controlling jobs and pipelines, and they help you avoid\nhard-coding values in your `.gitlab-ci.yml` configuration file. The\ninformation in this post should weave a larger picture by bringing together\nall (or most) of the information around defining and handling variables,\nmaking it easier to understand the scope and capabilities. Relevant\ndocumentation is linked throughout the post.\n\n\nIn [GitLab CI/CD](https://docs.gitlab.com/ee/ci/), variables can be used to\ncustomize jobs by defining and storing values. When using variables there is\nno need to hard code values. In GitLab, CI/CD variables can be defined by\ngoing to **Settings >> CI/CD >> Variables**, or by simply defining them in\nthe `.gitlab-ci.yml` file.\n\n\nVariables are useful for configuring third-party services for different\ndeployment environments, such as `testing`, `staging`, `production`, etc.\nModify the services attached to those environments by simply changing the\nvariable that points to the API endpoint the services need to use. Also use\nvariables to configure jobs and then make them available as environment\nvariables within the jobs when they run.\n\n\n![GitLab reads the .gitlab-ci.yml file to scan the referenced variable and\nsends the information to the GitLab Runner. The variables are exposed on and\noutput by the\nrunner.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_processing.jpeg)\n\n\n## The relationship between variables and environments\n\n\nSoftware development as a process includes stages to test a product before\nrolling it out to users.\n[Environments](https://docs.gitlab.com/ee/ci/environments/) are used to\ndefine what those stages look like and it may differ between teams and\norganizations.\n\n\nOn the other hand, variables are data values that are likely to change as a\nresult of user interaction with a product. For example, their age,\npreference, or any input you could possibly think of that might determine\ntheir next step in the product task-flow.\n\n\nWe often hear the term [environment\nvariable](https://docs.gitlab.com/ee/administration/environment_variables.html).\nThese are variables that are defined in a given environment, but outside the\napplication. GitLab CI/CD variables provide developers with the ability to\nconfigure values in their code. Using variables is helpful because it\nensures that the code is flexible. GitLab CI/CD variables allow users to\nmodify an application deployed to a certain environment without making any\nchange to code. It is simple to run tests or even integrate third-party\nservices by changing a configuration environment variable outside the\napplication.\n\n\n## The scope of variables for CI/CD\n\n\n![Order of precedence for CI/CD variables: 1) Manual pipeline run, trigger\nand schedule pipeline variables, 2) Project level, group level, instance\nlevel protected variables, 3) Inherited CI/CD variables, 4) Job level,\nglobal yml defined variables, 5) Deployment variables, 6) Pre-defined CI/CD\nvariables](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_precedence.jpeg)\n\n\n### `.gitlab-ci.yml` defined variables\n\n\nVariables that need to be available in the job environment can be added to\nGitLab. These CI/CD variables are meant to store non-sensitive project\nconfiguration, like the database URL in the `.gitlab-ci.yml` file. Reuse\nthis variable in multiple jobs or scripts, wherever the value is needed. If\nthe value changes, you only need to update the variable once, and the change\nis reflected everywhere the variable is used.\n\n\n### Project CI/CD variables\n\n\nMoving a step above the repository-specific requirements, you can define\nCI/CD variables in [project\nsettings](https://docs.gitlab.com/ee/ci/variables/#for-a-project), which\nmakes them available to CI/CD pipelines. These are stored out of the\nrepository (not in the `.gitlab-ci.yml` file), but are still available to\nuse in the CI/CD configuration and scripts. Storing the variables outside\nthe `.gitlab-ci.yml` file keeps these values limited to a project-only\nscope, and not saved in plain text in the project.\n\n\n### Group and instance CI/CD variables\n\n\nSome variables are relevant at the group level, or even instance level, and\ncould be useful to all projects in a group or instance. Define the variables\nin the [group or instance\nsettings](https://docs.gitlab.com/ee/ci/variables/#for-a-group) so all\nprojects within those scopes can use the variables without actually needing\nto know the value  or having to create the variables for the lower scope.\nFor example, a common value that needs to be updated in multiple projects\ncan be easily managed if it stays up-to-date in a single place.\nAlternatively, multiple projects could use a specific password without\nactually needing to know the value of the password itself.\n\n\n## Jobs and pipelines as environments\n\n\nGitLab CI/CD variables, besides being used as environment variables, also\nwork in the scope of the `.gitlab-ci.yml` configuration file to configure\npipeline behavior, unrelated to any environment. The variables can be stored\nin the project/group/instance settings and be made available to jobs in\npipelines.\n\n\nFor example:\n\n\n```  \n\njob:  \n  rules:  \n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n  script:  \n  - echo \"This job ran on the $CI_COMMIT_BRANCH branch.\"  \n```\n\n\nThe variable `($CI_COMMIT_BRANCH)` in the script section runs in the scope\nof the job in which it was defined. This scope is the \"job environment\" –\nmeaning, when the job starts, the GitLab runner starts up a Docker container\nand runs the job in that environment. The runner will make that variable\n(and all other predefined or custom variables) available to the job, and it\ncan display their value in the log output if needed.\n\n\nBut the variable is **also** used in the `if:` section to determine when the\njob should run. That in itself is not an environment, which is why we call\nthese CI/CD variables. They can be used to dynamically configure your CI/CD\njobs, **as well** as be used as environment variables when the job is\nrunning.\n\n\n## Predefined variables\n\n\nA number of variables are\n[predefined](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nwhen a GitLab CI/CD pipeline starts. A user can immediately access values\nfor things like commit, project, or pipeline details without needing to\ndefine the variables themselves.\n\n\n## Custom CI/CD variables\n\n\n![Runners can create two kinds of custom CI/CD variables: Type and\nFile.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variable_types.jpeg)\n\n\nWhen creating a CI/CD variable in the settings, GitLab gives the user more\nconfiguration options for the variable. Use these extra configuration\noptions for stricter control over more sensitive variables:\n\n\n**Environment scope:** If a variable only ever needs to be used in one\nspecific environment, set it to only ever be available in that environment.\nFor example, you can set a deploy token to only be available in the\n`production` environment.\n\n\n**Protected variables:** Similar to the environment scope, you can set a\nvariable to be available only when the pipeline runs on a protected branch,\nlike your default branch.\n\n\n**Variable type:** A few applications require configuration to be passed to\nit in the form of a file. If a user has an application that requires this\nconfiguration, just set the type of variable as a \"File\". Configuring the\nCI/CD variable this way means that when the runner makes the variable\navailable in the environment, it actually writes it out to a temporary file,\nand stores the path to the file as the value. Next, a user can pass the path\nto the file to any applications that need it.\n\n\nAlong with the listed ways of defining and using variables, GitLab\nintroduced a feature that generates pre-filled variables when there's a need\nto run a pipeline manually. Prefilled variables reduce the chances of\nrunning into an error and makes running the pipeline easier.\n\n\n**Masked variables:** [Masked\nvariables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable)\nare CI variables that have been **hidden in job logs** to prevent the\nvariable’s value from being displayed. \n\n\n**Masked and hidden variables:** Introduced in [GitLab\n17.4](https://about.gitlab.com/releases/2024/09/19/gitlab-17-4-released/#hide-cicd-variable-values-in-the-ui),\n[Masked and\nhidden](https://docs.gitlab.com/ee/ci/variables/#hide-a-cicd-variable)\nvariables provide the same masking feature from job logs and **keep the\nvalue hidden** **in the Settings UI**. We do not recommend using either of\nthese variables for sensitive data (e.g. secrets) as they can be\ninadvertently exposed. \n\n\n## Secrets\n\n\nA secret is a sensitive credential that should be kept confidential.\nExamples of a secret include:\n\n\n* Passwords  \n\n* SSH keys  \n\n* Access tokens  \n\n* Any other types of credentials where exposure would be harmful to an\norganization\n\n\nGitLab currently enables its users to [use external secrets in\nCI](https://docs.gitlab.com/ee/ci/secrets/), by leveraging HashiCorp Vault,\nGoogle Cloud Secret Manager, and Azure Key Vault to securely manage keys,\ntokens, and other secrets at the project level. This allows users to\nseparate these secrets from other CI/CD variables for security reasons.\n\n\n### GitLab Secrets Manager\n\n\nBesides providing support for external secrets in CI, GitLab is also working\non introducing a [native solution to secrets\nmanagement](https://gitlab.com/groups/gitlab-org/-/epics/10108) to securely\nand conveniently store secrets within GitLab. This solution will also help\ncustomers use the stored secrets in GitLab specific components and\nenvironments, and easily manage access at namespace groups and projects\nlevel. \n\n\n## Read more\n\n* [GitLab native secrets manager to give software supply chain security a\nboost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/)\n\n\n***Disclaimer:** This blog contains information related to upcoming\nproducts, features, and functionality. It is important to note that the\ninformation in this blog post is for informational purposes only. Please do\nnot rely on this information for purchasing or planning purposes. As with\nall projects, the items mentioned in this blog and linked pages are subject\nto change or delay. The development, release, and timing of any products,\nfeatures, or functionality remain at the sole discretion of GitLab.*\n",[9,917,853,721,109,766],"2025-01-13",{"slug":982,"featured":6,"template":701},"demystifying-ci-cd-variables","content:en-us:blog:demystifying-ci-cd-variables.yml","Demystifying Ci Cd Variables","en-us/blog/demystifying-ci-cd-variables.yml","en-us/blog/demystifying-ci-cd-variables",{"_path":988,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":989,"content":995,"config":1002,"_id":1004,"_type":14,"title":1005,"_source":16,"_file":1006,"_stem":1007,"_extension":19},"/en-us/blog/deploy-aws",{"title":990,"description":991,"ogTitle":990,"ogDescription":991,"noIndex":6,"ogImage":992,"ogUrl":993,"ogSiteName":686,"ogType":687,"canonicalUrls":993,"schema":994},"How to deploy to AWS with GitLab","We believe deploying to the cloud should be easy and boring. The deployment process is the same regardless of what tech stack you're using so why not automate it?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672124/Blog/Hero%20Images/aws_rocket.jpg","https://about.gitlab.com/blog/deploy-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy to AWS with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":990,"description":991,"authors":996,"heroImage":992,"date":998,"body":999,"category":741,"tags":1000},[997],"Orit Golowinski","2020-12-15","\nCloud computing services are replacing traditional hardware technologies at an extremely fast pace. The majority of businesses worldwide are already moving their applications to the cloud — both public and private cloud — or plan to in the near future. Over a short period of time, this technology took over the market as businesses preferred remote access to data as well as the cloud's scalability, economy, and reach.\n\n## AWS Deployment: deploying applications to the cloud\n\nCOVID-19 and the resulting trend toward remote work forced organizations to adopt cloud technologies even if they hadn’t planned to originally. Software deployment to the cloud has also increased. Cloud is no longer just virtual machines, organizations are driving the use of [Containers as a Service (CaaS)](https://searchitoperations.techtarget.com/definition/Containers-as-a-Service-CaaS) due to their growing interest in leveraging containers to ease development and testing, speed up deployment, scale operations, and increase the efficiency of workloads running in the cloud.\n\nSince deployment to the cloud has become a standard practice, at GitLab we want to make this repeatable and [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions). In this blog post, we explain how we've made it easier to deploy to Amazon Web Services (AWS) as part of your deployment process. We invite users to replicate this example to deploy to other cloud providers in a similar way.\n\nSince we want cloud deployment to be as flexible as possible (similar to a microservices architecture), we constructed atomic Docker images that function as building blocks. Users can use these images as part of their custom `gitlab-ci.yml` file or use our predefined `.gitlab-ci.yml` templates. We also added the ability to use [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with the new AWS deployment targets.\n\n## AWS Deployment: how to use GitLab's official AWS Docker Images\n\n### AWS CLI Docker image\nIn [GitLab 12.6](/releases/2019/12/22/gitlab-12-6-released/), we provided an official GitLab [AWS cloud-deploy](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/cloud_deploy/Dockerfile) Docker image that downloads and installs the [AWS CLI](https://aws.amazon.com/cli/). This allows users to run `aws` commands directly from their pipelines. For more information, see [Run AWS commands from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/#run-aws-commands-from-gitlab-cicd).\n\n### CloudFormation stack creation Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we provided a Docker image that runs a script that [creates a stack with CloudFormation](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-cloudformation). The `gl-cloudprovision create-stack` uses [aws cloudformation create-stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) behind the scenes. A JSON file based on the CloudFormation template must be passed to that command. For an example of this type of JSON file, see [`cf_create_stack.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/cf_create_stack.json). With this type of JSON file, the command creates the infrastructure on AWS, including an EC2 instance directly from the `.gitlab-ci.yml` file. The script exists once we get confirmation that the stack setup is complete or has failed (through periodic polling).\n\n### Push to S3 and Deploy to EC2 Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) we also provided a Docker image with [Push to S3 and Deploy to EC2 scripts](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-ec2). The `gl-ec2 push-to-s3` script pushes source code to an S3 bucket. For an example of the JSON file to pass to the `aws deploy push` command, see [`s3_push.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/s3_push.json). This code can be whatever artifact is built from a preceding build job. The `gl-ec2 deploy-to-ec2` script uses `aws deploy create-deployment` behind the scenes to create a deployment to an EC2 instance directly from the `.gitlab-ci.yml` file. For an example of the JSON template to pass, see [`create_deployment.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/create_deployment.json). The script ends once we get confirmation that the deployment has succeeded or failed (via polling).\n\n## AWS Deployment: using GitLab CI templates to deploy to AWS\n\n### How to deploy to Elastic Container Service (ECS) with GitLab\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we created a full `.gitlab-ci.yml` template called [`Deploy-ECS.giltab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/ECS.gitlab-ci.yml) that deploys to Amazon ECS and extends support for Fargate. Users can include the template in their configuration, specify a few variables, and their application will be deployed and ready to go in no time. This template can be customized for your specific needs. For example: Replacing the selected container registry, changing the path of the file location, etc.\n\n### How to deploy to Elastic Cloud Compute (EC2) with GitLab\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we created a full `.gitlab-ci.yml` template called [`CF-Provision-and-Deploy-EC2.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/EC2.gitlab-ci.yml) that provisions the infrastructure by leveraging [AWS CloudFormation](https://aws.amazon.com/cloudformation/). It then pushes your previously-built artifact to an [AWS S3 bucket](https://aws.amazon.com/s3/) and deploys the pushed content to [AWS EC2](https://aws.amazon.com/ec2/).\n\n## AWS Deployment: security  considerations\n\n### Predefined AWS CI/CD variables\n\nIn order to deploy to AWS, you must use AWS security keys to connect to to your AWS instance. Users can define this security keys as [CI/CD environment](/topics/ci-cd/) variables that can be used by the deployment pipeline.\n\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we added support for predefined AWS variables. This support function helps users know which variables are required for deploying to AWS and also prevents typos and spelling mistakes.\n\n| Env. variable name | Value|\n| --- | --- |\n| `AWS_ACCESS_KEY_ID` | Your Access key ID |\n| `AWS_SECRET_ACCESS_KEY` | Your Secret access key |\n| `AWS_DEFAULT_REGION` | Your region code |\n\n### \"Just-in-time\" guidance for AWS deployments\n\n[GitLab 13.1](/releases/2020/06/22/gitlab-13-1-released/) provides just-in-time guidance for users who wish to deploy to AWS. Setting up AWS deployments isn't always as easy as we'd like it to be, so we've added in-product links to our AWS templates and documentation when you start adding AWS CI/CD variables to make it easier for you to use our AWS features. This will help you get up and running faster.\n\n![In-product guidance for AWS](https://about.gitlab.com/images/blogimages/aws_guide.png)\n\nAWS guide from CI/CD variables\n\n### Added security for the GitLab's official AWS Docker images\n\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we changed the image identifier from the release version number to the Docker image digest. Docker supports immutable image identifiers and we adopted this best practice to update our cloud-deploy images. When a new image is tagged, we also programmatically retrieve the image digest upon its build and create a release note to effectively communicate this digest to users. This guarantees that every instance of the service runs exactly the same code. You can roll back to an earlier version of the image, even if that version wasn't tagged (or is no longer tagged). This can even prevent race conditions if a new image is pushed while a deploy is in progress.\n\n![Docker Image Digest](https://about.gitlab.com/images/blogimages/digest1.png)\n\nDocker image digest or release tag\n\n## AWS Deployment: auto DevOps support\n\nGitLab already supports Kubernetes users deploying to AWS EKS cluster. Click the link to read instructions about [how to deploy an application to a GitLab-managed Amazon EKS cluster with Auto DevOps](/blog/deploying-application-eks/#:~:text=The%20Auto%20DevOps%20function%20at,build%2C%20and%20deploy%20your%20application).\n\nWe also expanded Auto DevOps to support non-Kubernetes users. Users can specify their deployment target by adding the `AUTO_DEVOPS_PLATFORM_TARGET` variable under the CI/CD variables settings. Specifying the deployment target platform builds a full CI/CD pipeline that deploys to AWS targets.\n\nWe currently support:\n\n- `AUTO_DEVOPS_PLATFORM_TARGET: ECS` (added in GitLab 13.0)\n- `AUTO_DEVOPS_PLATFORM_TARGET: FARGATE` (added in GitLab 13.2)\n- `AUTO_DEVOPS_PLATFORM_TARGET: EC2` (added in GitLab 13.6)\n\nFor more information about Auto DevOps for AWS targets, see [requirements for Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) documentation.\n\nHere's a quick recording for how to use Auto Deploy to Amazon ECS:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/HzRhLLFlAos\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nSpeed run on how to use auto deploy to EC2 (animation):\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/rVr-vZfNL6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## AWS Deployment: Future plans to extend deployment support via GitLab\n\nCheck out some of the open issues below to see our plans are for the future of deploying to AWS using GitLab.\n\n- [Show AWS deployment success code in logs](https://gitlab.com/gitlab-org/gitlab/-/issues/215333): This will bring the success/failure codes from AWS into your GitLab pipeline logs, allowing you to see the deployment success code without needing to go into the AWS console to retrieve the logs.\n- [Show AWS deployment success code in pipeline view](https://gitlab.com/gitlab-org/gitlab/-/issues/232983): This will bring the success/failure codes from AWS into your GitLab pipeline, allowing you to see if the deployment job was successful in one view.\n- [Auto Deploy to AWS S3](https://gitlab.com/gitlab-org/gitlab/-/issues/219087): This will expand the supported deployment targets covered in this blog to include [S3 buckets](https://aws.amazon.com/s3/) as well.\n- [AWS integration per-environment role management](https://gitlab.com/gitlab-org/gitlab/-/issues/27107): This returns a set of temporary security credentials you can use to access AWS resources that you normally might not be able to access. This is accomplished by using the [AWS IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) roles.\n\n## More material on deploying to EKS and Lambda\n\n- [Demo of how to deploy to EKS](https://docs.google.com/presentation/d/1iXnB6lvTx2_-_0ASElLUDZwyFPWILCRx54XjJkMFuw0/edit#slide=id.g6bb36a7017_2_42).\n- [Whitepaper on how to deploy on AWS from GitLab](/resources/whitepaper-deploy-aws-gitlab/).\n\nWe invite you to contribute to our other cloud provider solutions:\n\n- [Streamline GCP deployments](https://gitlab.com/groups/gitlab-org/-/epics/2706).\n- [Streamline Azure deployments](https://gitlab.com/groups/gitlab-org/-/epics/4846).\n\nAt GitLab, [everyone can contribute](/company/strategy/#contribute-with-gitlab). If you want to deploy to a target that isn't mentioned in this post, please let us know by adding an issue and linking it to our [Natively support hypercloud deployments](https://gitlab.com/groups/gitlab-org/-/epics/1804) epic.\n\nCover image by [SpaceX](https://unsplash.com/photos/uj3hvdfQujI) on [Unsplash](https://www.unsplash.com)\n",[1001,765,721,9],"cloud native",{"slug":1003,"featured":6,"template":701},"deploy-aws","content:en-us:blog:deploy-aws.yml","Deploy Aws","en-us/blog/deploy-aws.yml","en-us/blog/deploy-aws",{"_path":1009,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1010,"content":1016,"config":1023,"_id":1025,"_type":14,"title":1026,"_source":16,"_file":1027,"_stem":1028,"_extension":19},"/en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"title":1011,"description":1012,"ogTitle":1011,"ogDescription":1012,"noIndex":6,"ogImage":1013,"ogUrl":1014,"ogSiteName":686,"ogType":687,"canonicalUrls":1014,"schema":1015},"DevSecOps workflows with conditional CI/CD pipeline rules","CI/CD pipelines can be simple or complex, what makes them efficient are CI rules that define when and how they run.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-06-27\",\n      }",{"title":1017,"description":1012,"authors":1018,"heroImage":1013,"date":1020,"body":1021,"category":741,"tags":1022},"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines",[1019],"Abubakar Siddiq Ango","2023-06-27","CI/CD pipelines can be simple or complex – what makes them efficient are\nrules that define when and how they run. By using rules, you create smarter\nCI/CD pipelines, which increase teams' productivity and allow organizations\nto iterate faster. In this tutorial, you will learn about the different\ntypes of CI/CD pipelines and rules and their use cases.\n\n\n## What is a pipeline?\n\nA pipeline is a top-level component of [continuous\nintegration](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-integration)\nand [continuous\ndelivery](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-delivery)/[continuous\ndeployment](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-deployment),\nand it comprises [jobs](https://docs.gitlab.com/ee/ci/jobs/index.html),\nwhich are lists of tasks to be executed. Jobs are organized in\n[stages](https://docs.gitlab.com/ee/ci/yaml/index.html#stages), which define\nwhen the jobs run.\n\n\nA pipeline can be a [basic\none](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines)\nin which jobs run concurrently in each stage. Pipelines can also be complex,\nlike [parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines),\n[merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html),\n[multi-project\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#multi-project-pipelines),\nor the more advanced [Directed Acyclic Graph\npipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/index.html)\n(DAG).\n\n\n![Complex pipeline showing\ndependencies](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/complex-pipelines.png)\n\n\nA [gitlab-runner\npipeline](https://gitlab.com/gitlab-org/gitlab-runner/-/pipelines/798871212/)\nshowing job dependencies.\n\n{: .note.text-center}\n\n\n![Directed Acyclic\nGraph](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/dag-pipelines.png)\n\n\nDirected Acyclic Graph pipeline\n\n{: .note.text-center}\n\n\nUse cases determine how complicated a pipeline can get. A use case might\nrequire testing an application and packaging it into a container; the\npipeline can even further deploy the container to an orchestrator like\nKubernetes or a container registry. Another use case might involve building\napplications that target different platforms with varying dependencies,\nwhich is where DAG pipelines shine.\n\n\n## What are CI/CD rules?\n\nCI/CD rules are the key to managing the flow of jobs in a pipeline. One of\nthe powerful features of GitLab CI/CD is the ability to control when a CI/CD\njob runs, which can depend on context, changes made,\n[workflow](https://docs.gitlab.com/ee/ci/yaml/workflow.html) rules, values\nof CI/CD variables, or custom conditions. Aside from using `rules`, you can\nalso control the flow of CI/CD pipelines using:\n\n\n* [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs):\nestablishes relationships between jobs and used in DAG pipelines\n\n* [`only`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except):\ndefines when a job should run\n\n* [`except`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except):\ndefines when a job should not run\n\n* [`workflow`](https://docs.gitlab.com/ee/ci/yaml/workflow.html): controls\nwhen pipelines are created\n\n\n`only` and `except` should not be used with `rules` as this can lead to\nunexpected behavior. It is recommended to use `rules`, learn more in the\nfollowing sections.\n\n\n## What is the `rules` feature?\n\n`rules` determine when and if a job runs in a pipeline. If you have multiple\nrules defined, they are all evaluated in order until a matching rule is\nfound and the job is executed according to the specified configuration.\n\n\n[Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) can be defined using the\nkeywords: `if`, `changes`, `exists`, `allow_failure`, `variables`, `when`\nand `needs`.\n\n\n### `rules:if`\n\nThe `if` keyword evaluates if a job should be added to a pipeline. The\nevaluation is done based on the values of [CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/index.html) defined in\nthe scope of the job or pipeline and [predefined CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html).\n\n\n```yaml\n\njob:\n  script:\n    - echo $(date)\n  rules:\n    - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_DEFAULT_BRANCH\n```\n\n\nIn the CI/CD script above, the job prints the current date and time with the\n`echo` command. The job is only executed if the source branch of a merge\nrequest (`CI_MERGE_REQUEST_SOURCE_BRANCH_NAME`) is the same as the project's\ndefault branch (`CI_DEFAULT_BRANCH`) in a [merge request\npipeline](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\nYou can use the `==` and `!=` operators for comparison, while `=~` and `!~`\nallow you to compare a variable to a regular expression. You can combine\nmultiple expressions using the `&&` (AND), `||` (OR) operators, and\nparentheses for grouping expressions.\n\n\n### `rules:changes`\n\nWith the `changes` keyword, you can watch for changes to certain files or\nfolders for a job to execute. GitLab uses the output of [Git\ndiffstat](https://git-scm.com/docs/git-diff#Documentation/git-diff.txt\n\n\n```yaml\n\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        - terraform/**/*.tf\n```\n\n\nIn this example, the `terraform plan` is only executed when files with the\n`.tf` extension are changed in the `terraform` folder and its\nsubdirectories. An additional rule ensures the job is executed for [merge\nrequest\npipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\n\nThe `changes` rule can look for changes in specific files with `paths`:\n\n\n```yaml\n\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n```\n\n\nChanges to files in a source reference (branch, tag, commit) can also be\ncompared against other references in the Git repository. The CI/CD job will\nonly execute when the source reference differs from the [specified reference\nvalue defined in\n`rules:changes:compare_to`](https://docs.gitlab.com/ee/ci/yaml/#ruleschangescompare_to).\nThis value can be a Git commit SHA, tag, or branch name. The following\nexample compares the source reference to the current `production` branch\n(`refs/head/production`).\n\n\n```yaml\n\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n        compare_to: 'refs/head/production'\n```\n\n\n### `rules:exists`\n\nLike `changes`, you can execute CI/CD jobs only when specific files exist\n[using `rules:exists`\nrules](https://docs.gitlab.com/ee/ci/yaml/#rulesexists). For example, you\ncan run a job that checks whether a `Gemfile.lock` file exists. The\nfollowing example audits a Ruby project for vulnerable versions of gems or\ninsecure gem sources using the [bundler-audit\nproject](https://github.com/rubysec/bundler-audit).\n\n\n```yaml\n\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        exits:\n          - Gemfile.lock\n```\n\n\n### `rules:allow_failure`\n\nThere are scenarios where the failure of a job should not affect the\nfollowing jobs and stages of the pipeline. This can be useful in use cases\nwhere non-blocking tasks are required as part of a project but don't impact\nthe project in any way. The [`rules:allow_failure`\nrule](https://docs.gitlab.com/ee/ci/yaml/#rulesallow_failure) can be set to\n`true` or `false`. It defaults to `false` implicitly when the rule is not\nspecified.\n\n\n```yaml\n\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED == \"false\"\n      changes:\n        exits:\n          - Gemfile.lock\n      allow_failure: true\n```\n\n\nIn this example, the job can fail only if a merge request event triggers the\npipeline and the target branch is not protected.\n\n\n### `rules:needs`\n\nDisabled by fault,\n[`rules:needs`](https://docs.gitlab.com/ee/ci/yaml/#rulesneeds) was\nintroduced in [GitLab\n16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/) and\ncan be enabled with the `introduce_rules_with_needs` [feature\nflag](https://docs.gitlab.com/ee/user/feature_flags.html).\n[`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs) is used to\nexecute jobs out of order without waiting for other jobs in a stage to\ncomplete. When used with `rules`, it replaces the job's `needs`\nspecification when the set conditions are met.\n\n\n```yaml\n\nstages:\n  - build\n  - qa\n  - deploy\n\nbuild-dev:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n  script: echo \"Building dev version...\"\n\nbuild-prod:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n  script: echo \"Building production version...\"\n\nqa-checks:\n  stage: qa\n  script:\n    - echo \"Running QA checks before publishing to Production....\"\n\ndeploy:\n  stage: deploy\n  needs: ['build-dev']\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      needs: ['build-prod', 'qa-checks']\n    - when: on_success # Run the job in other cases\n  script: echo \"Deploying application.\"\n\n```\n\n\nIn the example above, the deploy job has the `build-dev` job as a dependency\nbefore it runs; however, when the commit branch is the project's default\nbranch, its dependency changes to `build-prod` and `qa-checks`. This can\nallow for extra checks to be implemented based on context.\n\n\n### `rules:variables`\n\nIn some situations, you only need certain variables in specific conditions,\nor their values change based on content; you can use the\n[`rules:variables`](https://docs.gitlab.com/ee/ci/yaml/#rulesvariables) rule\nto define variables when specific conditions are met. This also allows to\ncreate more dynamic CI/CD execution workflows.\n\n\n```\n\njob:\n  variables:\n    DEPLOY_VERSION: \"dev\"\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      variables:\n        DEPLOY_VERSION: \"stable\"\n  script:\n    - echo \"Deploying $DEPLOY_VERSION version\"\n```\n\n\n### `workflow:rules`\n\nSo far, we have looked at controlling when jobs run in a pipeline using the\n`rules` keyword. Sometimes, you want to control how the entire pipeline\nbehaves: That's where [`workflow:rules` provide a powerful\noption](https://docs.gitlab.com/ee/ci/yaml/#workflowrules). `workflow:rules`\nare evaluated before jobs and take precedence over the job rules. For\nexample, if a job has rules that allow it to run against a specific branch,\nbut the workflow rules set jobs running against the branch to `when: never`,\nthe jobs will not run.\n\n\nAll the features of `rules` mentioned in the previous sections work for\n`workflow:rules`.\n\n\n```yaml\n\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"schedule\"\n      when: never\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n      when: never\n    - when: always\n```\n\n\nIn the example above, the CI/CD pipeline runs except when a schedule or push\nevent is triggered.\n\n\n## Use cases for CI/CD rules\n\nIn the previous section, we looked at different ways of using the `rules`\nfeature of GitLab CI/CD. In this section, we will explore practical use\ncases.\n\n\n### Developer experience\n\nOne of the benefits of a DevSecOps platform is to allow developers to focus\non what they do best: writing their code and doing as little operations as\npossible. A company's DevOps or Platform team can create CI/CD templates for\ndifferent stages of their development lifecycle and use rules to add CI/CD\njobs to handle specific tasks based on their technology stack. A developer\nonly needs to include a default CI/CD script and pipelines are automatically\ncreated based on files detected, refs used, or defined variables, leading to\nincreased productivity.\n\n\n### Security and quality assurance\n\nA major function of CI/CD pipelines is to catch bugs or vulnerabilities\nbefore they are deployed into production infrastructure. Using CI/CD rules,\nsecurity and quality assurance teams can dynamically run extra checks on\nchanges introduced when certain factors are introduced. For example, malware\nscans can be added when new file extensions not in an approved list are\ndetected, or more advanced performance tests are automatically added when a\ncertain level of change has been introduced to the codebase. With GitLab's\nbuilt-in security, including security in your pipelines can be done with\njust a few lines of code.\n\n\n```yaml\n\ninclude:\n  # Static\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - template: Jobs/SAST-IaC.gitlab-ci.yml\n  - template: Jobs/Code-Quality.gitlab-ci.yml\n  - template: Security/Coverage-Fuzzing.gitlab-ci.yml\n  # Dynamic\n  - template: Security/DAST.latest.gitlab-ci.yml\n  - template: Security/BAS.latest.gitlab-ci.yml\n  - template: Security/DAST-API.latest.gitlab-ci.yml\n  - template: API-Fuzzing.latest.gitlab-ci.yml\n```\n\n\n### Automation\n\nThe power of CI/CD rules shines through in the (nearly) limitless\npossibilities of automating your CI/CD pipelines. GitLab\n[AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) is an example.\nIt uses an opinionated best-practice collection of [GitLab CI/CD\ntemplates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates)\nand rules to detect the technology stack used. AutoDevOps creates relevant\njobs that take your application all the way to production from a push. You\ncan review the [AutoDevOps\ntemplate](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)\nto learn how it leverages CI/CD rules for greater efficiency.\n\n\n### Using CI/CD components\n\nGrowth comes with several iterations of work and creating best practices.\nWhile building CI/CD pipelines, your DevOps team would have made several\nCI/CD scripts that they repurpose across pipelines using the\n[`include`](https://docs.gitlab.com/ee/ci/yaml/#include) keyword. In [GitLab\n16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/),\nGitLab [introduced CI/CD\nComponents](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#cicd-components),\nan experimental feature that allows your team to create reusable CI/CD\ncomponents and publish them as a catalog that can be used to build smarter\nCI/CD pipelines rapidly. You can learn more [about using CI/CD\ncomponents](https://docs.gitlab.com/ee/ci/components/) and the [component\ncatalog\ndirection](https://about.gitlab.com/direction/verify/component_catalog/).\n\n\nGitLab CI/CD enables you to run smarter pipelines, and it does so together\nwith [GitLab Duo, AI-powered workflows](/gitlab-duo/) to help you build more\nsecure software, faster.\n",[766,721,9,789,496],{"slug":1024,"featured":6,"template":701},"efficient-devsecops-workflows-with-rules-for-conditional-pipelines","content:en-us:blog:efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","Efficient Devsecops Workflows With Rules For Conditional Pipelines","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"_path":1030,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1031,"content":1037,"config":1043,"_id":1045,"_type":14,"title":1046,"_source":16,"_file":1047,"_stem":1048,"_extension":19},"/en-us/blog/efficient-pipelines",{"title":1032,"description":1033,"ogTitle":1032,"ogDescription":1033,"noIndex":6,"ogImage":1034,"ogUrl":1035,"ogSiteName":686,"ogType":687,"canonicalUrls":1035,"schema":1036},"Extract greater efficiency from your CI pipelines","Learn some techniques to find the balance between pipeline performance and resource utilization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667534/Blog/Hero%20Images/ci-pipeline.jpg","https://about.gitlab.com/blog/efficient-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Extract greater efficiency from your CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vlad Budica\"}],\n        \"datePublished\": \"2022-03-09\",\n      }",{"title":1032,"description":1033,"authors":1038,"heroImage":1034,"date":1040,"body":1041,"category":741,"tags":1042},[1039],"Vlad Budica","2022-03-09","\nWhen discussing efficiency, typically we need to balance two things: time and money. It's quite easy to optimize for just one of these parameters. However, that can be an oversimplification. Within some constraints, more resources (i.e., hardware and Runners) equal better performance. Yet, the exact opposite is true for other constraints. In this article, I will walk you through the process of finding the sweet spot in optimizing your GitLab CI pipeline. The principles that I'll cover work well for existing pipelines and also for new ones. Please note that this is subjective and the sweet spot might be very different for different users in different scenarios.\n\nAs we dig into the technical aspects, note that we are looking for an overall optimization of a pipeline, as opposed to just looking at a particular job. The reasoning behind it is that local optimizations might make the overall pipeline less efficient (we might generate bottlenecks).\n\nThe optimization recommendations below fall into two categories:\n- Execute fewer jobs and pipelines\n- Shorten the execution time of jobs and pipelines\n\nThe first step before modifying an aspect of a system is to understand it. Observe it in full. You need to know the overall pipeline architecture and also the current metrics for it. You need to know the total execution time, jobs that take a large amount of time to finish (any bottlenecks), and the total job workload (potential queue time) and Runner capacity – these last two go hand in hand. Finally, we can use [Directed Acyclic Graphs](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), or DAGs, to visualize the pipeline and see the critical path (the minimum and maximum pipeline duration). We want to do this because we want to minimize as much as possible the detrimental impact doing changes can have on pipeline performance.\n\n## Execute fewer jobs and pipelines\n\nLet's look at ways of reducing the number of jobs and pipelines that get executed.\n\n### Apply rules\n\nThe first thing would be to decide what needs to be executed and when. For example, with a website, if the only change that was performed is to the text on the page, then the resulting pipeline doesn't need to contain all the tests and checks that are performed when changing the web app.\n\nThis requires the use of the [rules keyword](https://docs.gitlab.com/ee/ci/yaml/#rules). Rules are evaluated when a pipeline is created (at each trigger), and evaluated in order until the first match. When a match is found, the job is either included or excluded from the pipeline, depending on the configuration.\n\nThrough the rules keyword you can decide very precisely when a job should run or not. More information about use cases and configuration parameters can be found in the [doc page for rules](https://docs.gitlab.com/ee/ci/yaml/#rules).\n\n### Make jobs interruptible\n\nNow that jobs are only running when needed, you can focus on what happens when a new pipeline is triggered while a job is still running. This can lead to inefficiencies because we already know the job isn't running on the latest change performed on the target branch and that the results will get scrapped.\n\nThis is where the [interruptible keyword](https://docs.gitlab.com/ee/ci/yaml/#interruptible) comes in. It enables us to specify that a job can be interrupted when a newer one is triggered on the same branch. This should be coupled with the [automatic cancellation of redundant pipelines feature](https://docs.gitlab.com/ee/ci/pipelines/settings.html#auto-cancel-redundant-pipelines) so, in the end, jobs will be automatically canceled when newer pipelines are triggered.\n\nOne word of caution, use this mechanism only with jobs that are safe to stop such as a build or a test job. Don't use this with your deployment jobs as you're eventually going to end up with partial deployments.\n\nOne last point around executing fewer jobs and pipelines is to try to reschedule non-essential pipelines to as least frequent as possible. It's a balance that needs to be found between running the pipelines too often and not running them enough. Just go with the minimum acceptable by your company policy.\n\n## Shorten the execution time of jobs and pipelines\n\nThe next thing would be to find ways of making our jobs and pipelines execute in less time.\n\n### Execute jobs in parallel\n\nYou can [create DAGs in your pipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/) to create relationships between jobs and ensure that jobs are executed as soon as all the requirements are met if there are any and that they aren't waiting unnecessarily for other jobs to finish. By using the [needs keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) together with the [parallel keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel), you can implement DAGs.\n\nAnother useful mechanism to drive parallelism is [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), which enable you to trigger concurrently running pipelines.\n\nThese offer great flexibility and by using them you can execute your workloads in parallel as efficiently as possible. This can be a double-edged sword though as DAGs and [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) will increase the complexity of your pipelines, making them harder to analyze and understand. Within this very complex environment, you can run into unwanted side effects such as increased cost or even reduced efficiency.\n\nThe more jobs and pipelines you run in parallel, the more load will be put on your Runner infrastructure. If you do have an autoscaling mechanism and a large enough pool of resources, this will ensure no big queues are created and that things are running smoothly, but also lead to increased infrastructure costs. On the other hand, if you don't have autoscaling or if you have lower limits for the amount of resources available, the costs will be kept in check but your overall execution time will suffer because jobs will wait longer in queues.\n\n### Fail fast\n\nIt's desirable to detect errors and critical failures as soon as possible in your jobs and pipelines, and stop the execution. If you wait until toward the end of the pipeline to fail, the whole pipeline will waste hardware resources and increase your execution and waiting times. This is easier to implement when first designing a pipeline but can be achieved as well through refactoring of your existing ones.\n\nTesting usually takes a lot of time so this means that we're waiting for the execution to finish before canceling the whole pipeline if the tests fail. What we want to do is move the jobs that run quicker earlier in the pipeline thus getting feedback sooner. To configure this behavior, use the [allow_failure keyword](https://docs.gitlab.com/ee/ci/yaml/#allow_failure) and only for jobs that when fail should fail the whole pipeline.\n\n### Caching\n\nYou can also optimize the caching of your dependencies, which will improve the execution time. This can be very useful for jobs that fail often but for which the dependencies don't change that often.\n\nTo configure this in your jobs, you should use the [cache:when keyword](https://docs.gitlab.com/ee/ci/yaml/#cachewhen).\n\n### Optimize your container images\n\nUsing big images in your pipelines can slow things down significantly, as they take longer to be pulled. So the solution would be to use smaller images. Simple, right?\n\nWell, it's not always that easy to do, so you should start by analyzing your base image and your network speed as these two will give an indication of how long it will take for the image to be pulled. The network connection we're interested in is the one between your Runner and your container registry.\n\nOnce we have this kind of information, we can decide to host the image in another container registry. If you have GitLab hosted in a public cloud you should use the container image registry provided by that provider. An alternative that works no matter where GitLab is hosted is to use the internal GitLab container registry that's included with your service.\n\nYou will get better results if instead of using a master container image that holds everything that you need to run the whole pipeline, you use multiple smaller ones that are tailored for each job. It's faster if you use custom container images and have all the tools you need pre-installed. This would also be a safer option because you can validate more thoroughly the contents of the image.\n\nMore information about this topic can be found in [Docker's \"Best practices for writing Dockerfiles\"](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/).\n\n## Pipeline optimization is part science, part art\n\nYou should approach your pipeline optimization efforts through a continuous improvement lens. This process is part science, part art as there aren't any quick solutions that you can apply and get your ideal result.\n\nI encourage you to test, document, and analyze the results when it comes to pipeline optimization efforts. You try one thing, look for feedback from the metrics of your pipelines, document the results, the changes, and the new architecture (this can happen in GitLab issues and merge requests) so you can extract some learnings, and the cycle starts again.\n\nSmall gains will add up and provide significant improvements at a higher scale. As I mentioned before, look for overall improvements instead of local ones. Now applying these principles to each project (pipeline templates makes it easier to adopt at scale), we can look at how these improvements across projects add up.\n\nRead more: Learn how to [troubleshoot a GitLab pipeline failure](https://docs.gitlab.com/ci/debugging/).\n",[765,721,9],{"slug":1044,"featured":6,"template":701},"efficient-pipelines","content:en-us:blog:efficient-pipelines.yml","Efficient Pipelines","en-us/blog/efficient-pipelines.yml","en-us/blog/efficient-pipelines",{"_path":1050,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1051,"content":1057,"config":1065,"_id":1067,"_type":14,"title":1068,"_source":16,"_file":1069,"_stem":1070,"_extension":19},"/en-us/blog/ensuring-compliance",{"title":1052,"description":1053,"ogTitle":1052,"ogDescription":1053,"noIndex":6,"ogImage":1054,"ogUrl":1055,"ogSiteName":686,"ogType":687,"canonicalUrls":1055,"schema":1056},"How to ensure separation of duties and enforce compliance with GitLab","Use your DevSecOps platform to help maintain compliance without compromising on development speed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098232/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_479904468%20%281%29_4lmOEVlaXP0YC3hSFmOw6i_1750098232241.jpg","https://about.gitlab.com/blog/ensuring-compliance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to ensure separation of duties and enforce compliance with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Beatriz Barbosa\"},{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2022-04-04\",\n      }",{"title":1052,"description":1053,"authors":1058,"heroImage":1054,"date":1061,"body":1062,"category":787,"tags":1063,"updatedDate":1064},[1059,1060],"Beatriz Barbosa","Fernando Diaz","2022-04-04","In this article, you'll learn the different ways to ensure **separation of\nduties** and\n\n**continuous compliance** with the GitLab DevSecOps platform. But first,\nlet's level-set on two key concepts:\n\n\n**Compliance** means being in accordance with guidelines and specifications\nthat have been\n\ndefined either by your corporation or a regulatory agency. Compliance helps\nmaintain\n\ncorporate ethics, appropriate user policies, security standards, and much\nmore for\n\nthe safety of consumers.\n\n\nNon-compliance may result in a bundle of legal fees and fines, so it is very\nimportant to maintain compliance. While maintaining compliance, DevSecOps\nteams must also ensure sustained development velocity, providing necessary\nsimplicity, visibility, and control.\n\n\n**Separation of duties** requires multiple actors to complete a task to\nincrease protection from error as well as prevent malicious activity.\nSeparation of duties ensures roles best-suited for the job are the only ones\nthat can perform it. As an example, some of the following\n\nactors are observed, each with a specific purpose:\n\n\n- a developer will be responsible for developing new features\n\n- a compliance officer will be responsible for creating and enforcing the\nusage of a pipeline\n\n- an application security engineer will be responsible for approving merge\nrequests with vulnerabilities\n\n\nConsidering the above roles, we can ensure that a developer cannot change a\nrunning pipeline.\n\nThis is a task that can only be performed by a compliance officer, ensuring\nonly compliant code can be pushed without approval.\n\n\nAn application security engineer is responsible for reviewing and approving\ncode with vulnerabilities, ensuring proper mitigation can be performed, and\nthat nothing comes as a surprise in the future. In this scenario, developers\ncan't merge code until compliance\n\nand security requirements are met.\n\n\n## Security policies\n\nGitLab provides **Security Policies**, which enable security teams to\nrequire security scans to run according to a configuration. This gives\nsecurity teams confidence that the configured scans have not been changed or\ndisabled.\n\n\nSecurity policies can be scoped to meet certain **Compliance Frameworks**.\nThis means that your project has certain compliance requirements and needs\nadditional oversight. This label can be created in **Secure > Compliance\nCenter > Frameworks** under your top-level group.\n\n\n![Compliance Framework\nLabel](https://about.gitlab.com/images/blogimages/compliance-04-2022/cf-step-2.png)\n\n\n**Note:** Compliance labels can only be assigned to projects within the\ntop-level group in which we create the label.\n\n\nThere are three types of policies, [Scan Execution\nPolicies](https://docs.gitlab.com/ee/user/application_security/policies/scan_execution_policies.html),\n[Merge Request Approval\nPolicies](https://docs.gitlab.com/ee/user/application_security/policies/merge_request_approval_policies.html),\nand [Pipeline Execution\nPolicies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html).\n\n\n* **Scan Execution Policies:** Require that security scans run on a\nspecified schedule or with the project pipeline.\n\n* **Merge Request Approval Policies:** Take action based on scan results,\nsuch as requiring approval from the security team before a merge can occur.\n\n* **Pipeline Execution Policies:** Enforce CI/CD jobs for applicable\nprojects.\n\n\nThese policies can be configured via the Policy Editor in a few simple\nsteps.\n\n\n### Scan execution\n\n\n1. Go to **Security & Compliance > Policies**.\n\n\n2. Create a new policy by pressing the **New Policy** button.\n\n\n3. Select **Scan Execution**.\n\n\n4. Create the rule. I'm creating a rule that requires\n[SAST](https://docs.gitlab.com/ee/user/application_security/sast/) to be\nconfigured in order for a pipeline to run.\n\n\n```yaml\n\nname: force_sast\n\ndescription: 'require sast to run'\n\nenabled: true\n\nrules:\n\n- type: pipeline\n  branches:\n  - main\nactions:\n\n- scan: sast\n\n```\n\n\n5. Submit the policy by creating a merge request and then merge.\n\n\nAll scan execution policy changes are applied through a background job that\nruns once every 10 minutes.\n\nAllow up to 10 minutes for any policy changes committed to this project to\ntake effect.\n\n\n6. Try and run a pipeline. It will not be run unless SAST is defined in the\nYAML.\n\n\n**Note**: You can also force SAST to run on a timer. For more information,\nsee the scan execution\n\npolicies\n[documentation](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html).\n\n\n### Merge Request Approval\n\n\n1. Go to **Secure > Policies**.\n\n\n2. Create a new policy by pressing the **New Policy** button.\n\n\n3. Select **Merge Request Approval Policy**.\n\n\n4. Define policy scope.\n\n\n5. Create the rule.\n\n\n![separation of duties update - image\n1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098241/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098241214.png)\n\n\n6. Add action to take.\n\n\n![separation of duties update - image\n2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098241/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098241215.png)\n\n\n**Note:** The policy is evaluated according to the rules you set. This means\nthat, if the rules are invalid, or can’t be evaluated, approval is required.\nTo prevent this, the default Fallback behavior field can be changed to\n`open`.\n\n\n![separation of duties update - image\n3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098241/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098241217.png)\n\n\n1. Submit the policy by creating a merge request and then merging\n\n\n2. Create a separate merge request with vulnerabilities\n\n\nYou can see how to add vulnerabilities by checking out the Developer\nWorkflow section of the GitLab DevSecOps Workshop.\n\n\n3. Verify Merge Request Approval Policy is being used by viewing merge\nrequest.\n\n\n### Pipeline Execution Policy\n\n\nTo set up a pipeline execution policy, you need to first create a project\ncontaining the CI files you would like to run. Make sure that only the\nsecurity team and/or administrator has access to ensure separation of\nduties. I created the \"Compliance and Deploy\" project, which contains the\nYAML I wish to enforce.\n\n\n1. Go to **Secure > Policies**.\n\n\n2. Create a new policy by pressing the **New Policy** button.\n\n\n3. Select **Pipeline Execution Policy**.\n\n\n4. Define policy scope.\n\n\n5. Add action to take.\n\n\n![separation of duties update - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098241/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098241219.png)\n\n\n6. Add conditions.\n\n\n7. Submit the policy by creating a merge request and then merging.\n\n\n8. Try and run a pipeline. You will see the policy specific jobs and stages\nin your pipeline.\n\n\n## Audit Management and Compliance Dashboard\n\n\nAnother important part of compliance is knowing it is actually happening in\nyour groups/projects. GitLab has Audit Events and Compliance Reports to\nassist with audits.\n\n\n**Audit Events** allows GitLab owners and administrators to track important\nevents such as who performed certain actions and the time they occurred.\n\n\n![Audit\nevents](https://about.gitlab.com/images/blogimages/compliance-04-2022/project-audit-events.png)\n\n\nAudit Events records different events per group and per project, which can\nbe seen\n\nin the [audit\nevents](https://docs.gitlab.com/ee/administration/audit_events.html)\ndocumentation.\n\nAudit Events can be accessed by going to **Security & Compliance > Audit\nEvents**.\n\nSome examples include:\n\n\n- user was added to project and their permissions\n\n- permission changes of a user assigned to a project\n\n- project CI/CD variable added, removed, or protected status changed\n\n- user was added to group and their permissions\n\n- group name or path changed\n\n\nAudit Events can also be sent to an HTTP endpoint using Audit Event\nStreaming. Learn how\n\nto implement Audit Event Streaming in this\n[video](https://youtu.be/zHwVF9-i7e4?t=52).\n\n\n**Standards Adherence** gives you the ability to see a group's merge request\nactivity. It provides a high-level view for all projects in the group.\n\n\n![separation of duties update - image\n5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098241/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098241222.png)\n\n\nYou can use the report to:\n\n- get an overview of the latest merge request for each project\n\n- see if merge requests were approved and by whom\n\n- see merge request authors\n\n- see the latest CI/CD pipeline result for each merge request\n\n\nThe Standards Adherence report can be accessed in the top-level group by\ngoing to **Secure > Compliance Center**, and choosing the **Standards\nAdherence** tab.\n\n\n---\n\n\nThanks for reading! For more information on separation of duties within\nGitLab, check out [Continous Software Compliance with\nGitLab](/solutions/compliance/)\n",[787,721,9,917],"2024-12-16",{"slug":1066,"featured":6,"template":701},"ensuring-compliance","content:en-us:blog:ensuring-compliance.yml","Ensuring Compliance","en-us/blog/ensuring-compliance.yml","en-us/blog/ensuring-compliance",{"_path":1072,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1073,"content":1079,"config":1084,"_id":1086,"_type":14,"title":1087,"_source":16,"_file":1088,"_stem":1089,"_extension":19},"/en-us/blog/first-code-to-ci-cd-deployments-in-5-minutes",{"title":1074,"description":1075,"ogTitle":1074,"ogDescription":1075,"noIndex":6,"ogImage":1076,"ogUrl":1077,"ogSiteName":686,"ogType":687,"canonicalUrls":1077,"schema":1078},"A journey from the first code to CI/CD deployments in 5 minutes?","From writing, building, and testing code to reviewing, releasing, and deploying in 5 minutes. Is this possible? Learn which hurdles you might encounter and how to solve them. Spoiler: Without Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665823/Blog/Hero%20Images/snow-speed-unsplash.jpg","https://about.gitlab.com/blog/first-code-to-ci-cd-deployments-in-5-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A journey from the first code to CI/CD deployments in 5 minutes?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":1074,"description":1075,"authors":1080,"heroImage":1076,"date":998,"body":1082,"category":695,"tags":1083},[1081],"Michael Friedrich","\n{::options parse_block_html=\"true\" /}\n\nSoftware architecture and [DevOps](/topics/devops/) strategies are hard. Trust me, I know from experience. In my previous role, I was involved in \"all the things\" relating to our DevOps lifecycle, and we faced issues with everything from [continuous deployment (CD)](/topics/ci-cd/) to database management to implementing microservices.\n\nDo any of these scenarios sound familiar?\n\n- We want to adopt microservices but our application is not ready.\n- We know Kubernetes and containers are awesome but we cannot figure out how to get started.\n- We want to do CD but we are still doing manual deployments.\n\nIf you are facing one of these situations, you are not alone. I have lived through them in past roles and now spend my days talking to and helping folks across the industry who are facing these problems (and worse). These common problems lead to a larger conversation at GitLab: Why does it take 20 minutes or more to create a production app in 2020?\n\nThis question is why we challenged ourselves (okay, it's why Sid challenged us) to create a 5 minute production app. The goal is to get from having a free AWS account to a Rails/Node production app with a persistent [serverless](/topics/serverless/) database, Auto DevOps, Single Sign-On (SSO), Redis, object storage, and email in 5 minutes using only the GitLab UI.\n\nOur vision for the 5 minute production app is to provide everyone with a pathway to efficient deployments by minimizing infrastructure dependencies. This builds on learned lessons from [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and [Infrastructure as Code with Terraform](https://docs.gitlab.com/ee/user/infrastructure/) (for example, by removing the requirement for Kubernetes).\n\n### Common problems\n\n#### Kubernetes and microservices\n\nKubernetes and containers can be overwhelming. The value they add comes with increased levels of complexity. Similarly, microservices can improve efficiency and high availability but they may not fit for all application architectures. Large rewrites might be necessary to take advantage of the benefits they provide.\n\nHeroku and Cloud Native Build Packs are a great way to automate Docker image creation with all dependencies but not all use cases are covered. When these deployments break, debugging can be hard without in-depth knowledge of the components. Defining and maintaining the dependencies in the build process by yourself can help, for example in your own Docker container group using the [GitLab Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n#### Backend requirements\n\nA web application can have a stateful backend where it stores persistent data. This can be a file on disk, a database server or an object storage in the cloud. The stored data can be user settings, inputs into web forms and generated content for example.\n\nDepending on the programming language, the interaction with the backend can get complex. A database client library is required to communicate with a PostgreSQL server. The database schema needs to be initialised, and future changes require incremential schema updates. The schema update migrations can be automated by the application. This requires client libraries providing this functionality. Ruby on Rails uses rake db tasks while it can get more complicated with PHP.\n\nThe database server needs to be running in order for the web application to work. This can happen on the same host, a central database cluster, or a cloud service such as Amazon Aurora. Someone must be responsible for keeping the server running, monitoring it, and managing software updates.\n\nAll backend solutions require maintenance. As a developer, you want to have these steps automated and abstracted. Your code communicates with the backend interfaces as a blackbox, expecting them to be healthy and operational when the application starts.\n\n### Path to resolution\n\n#### Deploy and run the application\n\nThe production environment for a basic web application requires the following steps:\n\n- Start/Detect the database server or service\n- Initialize/Migrate the database schema\n- Start the web application\n- Schedule periodic health checks and add performance monitoring\n\nIn addition to the boot steps, these web applications can depend on additional libraries and packages. Common best practice is to define them in the programming language's package dependency manager, for example `requirements.txt` with Python, or `Gemfile` with Ruby. The software deployment process evolved over the years with packaging the application into container images, containing the application and all dependencies. The CI/CD jobs do not need to add any extra steps for software installation. As a developer, you don’t care about the OS or distribution where the application is deployed.\n\n#### Choose your stack\n\nThe decision to choose the \"right\" tools for the job can be hard. It helps to define the required steps and map them onto existing functionality provided by GitLab:\n\n- Provision a new virtual machine\n- Define the state with Infrastructure as Code\n- Build and deploy the application\n- Run the application\n\nWe have decided start with AWS as a deployment scenario:\n\n- Ask for AWS credentials for EC2\n- Run Terraform and provision the VM\n- Create AWS Aurora RDS as PostgreSQL backend\n- Install application package dependencies into a container image\n- Pull the image on the host\n- Run and monitor the application\n\nThis process involves lots of steps, requiring different tools and frameworks. After all those years, isn’t there a ready-to-use workflow to abstract this and have everything automatically deployed?\n\n### How we settled on the stack for the 5 min production app\n\n1. AWS: Biggest cloud\n2. Terraform: Most popular infrastructure provisioning\n3. Auto DevOps: Same direction\n\nWe have refined the decisions during the implementation of the deployment process. The first iteration attempted to work without container images. This resulted in having many different ways to distribute and install software. We decided to take one step back and use container images to build the web application as package. The GitLab container registry works as package repository. The container image is pulled and run on the deployed host.\n\nAWS provides Aurora RDS as serverless PostgreSQL database service. We decided to use an existing service in the first iteration, and evaluate database instance management in the future. Terraform as deployment provisioner allows us to build on the foundation from our [Infrastructure as Code integration](https://docs.gitlab.com/ee/user/infrastructure/). The first apps are written in Ruby on Rails and [Python](https://gitlab.com/gitlab-de/5-min-prod-app-python-web), we are planning with more to come soon.\n\n![GitLab CI/CD pipeline deployment](https://about.gitlab.com/images/blogimages/5-min-prod-app/gitlab_cicd_pipeline_deployed.png){: .shadow.medium.center}\n\n![AWS EC2 view](https://about.gitlab.com/images/blogimages/5-min-prod-app/aws_ec2_view.png){: .shadow.medium.center}\n\nOur vision for the 5 minute production app flow:\n\n1. Go to GitLab.com.\n2. Sign in with your AWS account.\n3. New Project.\n4. Rails/Node/etc. template.\n5. Write some code and create a merge request.\n6. Get a review app and test results in the MR.\n7. Merge the MR.\n8. Automatically deployed to production.\n9. Share URL of production app with a friend.\n10. Production app has a persistent state and can reset passwords via email (DB, s3, redis, mail) and provides the full Auto DevOps features (Monitoring, etc.).\n11. No manual steps for setting up DB, s3, redis, mail. Terraform takes care of automated setup.\n12. All within AWS in the free tier.\n13. No command line or terminal required, everything accessible in the GitLab UI.\n\n### What comes next\n\nThe next iterations include more scenarios and questions:\n\n- Domain and SSL support\n- Review environments and rollbacks\n- Python web application with database migrations\n- NodeJS app with a PostgreSQL backend\n- Support for more cloud providers and local deployments\n- Decoupled database server management\n\nThe deployment template will soon be [merged into GitLab Core](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49487). This is great news for everyone joining us for feedback and tests. Let us know what you think, and follow our progress with these resources:\n\n- [Issue Board](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/boards)\n- [Recordings on YouTube](https://www.youtube.com/playlist?list=PL05JrBw4t0Krf0LZbfg80yo08DW1c3C36)\n- [Deploy Template project](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template)\n\nCover image by [Nicolas J Leclercq](https://unsplash.com/@nicolasjleclercq?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/speed-snow)\n",[721,9,269],{"slug":1085,"featured":6,"template":701},"first-code-to-ci-cd-deployments-in-5-minutes","content:en-us:blog:first-code-to-ci-cd-deployments-in-5-minutes.yml","First Code To Ci Cd Deployments In 5 Minutes","en-us/blog/first-code-to-ci-cd-deployments-in-5-minutes.yml","en-us/blog/first-code-to-ci-cd-deployments-in-5-minutes",{"_path":1091,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1092,"content":1098,"config":1105,"_id":1107,"_type":14,"title":1108,"_source":16,"_file":1109,"_stem":1110,"_extension":19},"/en-us/blog/five-ways-to-streamline-cloud-adoption",{"title":1093,"description":1094,"ogTitle":1093,"ogDescription":1094,"noIndex":6,"ogImage":1095,"ogUrl":1096,"ogSiteName":686,"ogType":687,"canonicalUrls":1096,"schema":1097},"5 ways to streamline your cloud adoption","As companies migrate to the cloud, consider these helpful tips for making the move smoother and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663930/Blog/Hero%20Images/daytime-clouds_1800x945.png","https://about.gitlab.com/blog/five-ways-to-streamline-cloud-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways to streamline your cloud adoption\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-09-05\",\n      }",{"title":1093,"description":1094,"authors":1099,"heroImage":1095,"date":1101,"body":1102,"category":1103,"tags":1104},[1100],"Sharon Gaudin","2023-09-05","\nMoving to the cloud makes sense to a lot of companies — it’s getting there that can be difficult.\n\n[GitLab’s 2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) showed that migrating to the cloud can help organizations release software faster: Respondents who were running at least 25% of their applications in the cloud were twice as likely to release software faster than they were a year ago.\n\nHowever, the migration, whether to a single-cloud service or a multi-cloud environment, can be a big lift. IT teams are tasked with securing major data stores and workloads, navigating the complexities of moving legacy applications, and ensuring that cloud environments comply with applicable data regulations and laws. It can be complicated, with a lot of moving pieces that are often difficult to track.\n\nAnd the longer a migration drags on, the more things can go wrong and the more expensive it can get. It only makes sense to look for a way to make something so critical to the business easier, faster, and less expensive.\n\nAbubakar Siddiq Ango, developer evangelism program manager at GitLab, and Fatima Sarah Khalid, developer evangelist at GitLab, share five ways organizations can alleviate some of the time-consuming, repetitive, and arduous tasks it takes to successfully make that move.\n\n## 1. Take care of your data\nOne of the most difficult parts of a cloud migration is moving the data itself – especially if it’s complex and stored across multiple systems – but there are a few ways you can organize and streamline the tasks involved to make them more straightforward. For example, to save time and increase efficiency, Khalid notes that team members can create [issues](https://docs.gitlab.com/ee/user/project/issues/), break tasks down into [milestones](/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features/), and use the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature, which gives teams a more granular view of their workflow.\n\n## 2. Avoid security pitfalls\n[Security](/blog/its-time-to-put-the-sec-in-devsecops/) should be a key consideration in any cloud migration. Moving to a cloud environment can inadvertently cause misconfigured servers, unsecure APIs, compliance infringements, and data loss. Any of these problems can trip up cloud migration efforts and expose the company to risk.\n\nTo ensure the move to the cloud proceeds smoothly while minimizing security risks, Ango says teams can use [container](https://docs.gitlab.com/ee/user/application_security/container_scanning/) and [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/) (SAST) to identify and remediate known vulnerabilities in container images, dependencies, and source code. Teams also can use features such as [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) analysis to supplement existing code review processes and ensure that the project’s code is simple, high-quality, and straightforward to maintain — and, therefore, less likely to cause issues during the migration.\n\n## 3. Automate compliance\n[Compliance](/blog/top-5-compliance-features-to-leverage-in-gitlab/) is another critical issue. IT teams need to ensure the new cloud environment continues to meet all of the organization's regulatory requirements — a potentially large number of standards. That means making sure processes and safeguards focused on data protection are in place and cover the information and applications being moved to the cloud. Manually, that can involve spreadsheets, seemingly endless checklists, and cross-functional teams of people culling through data. Automation makes this more streamlined, requires far fewer people to navigate the process, and is simpler to manage. Automated DevOps practices, like security scanning, [policy automation](/solutions/compliance/), and making compliance standards part of the CI/CD pipeline, all act as guardrails to [keep an organization’s compliance needs on track](/blog/the-importance-of-compliance-in-devops/). With these tools at hand, team members can trust that when they create compliance frameworks and policies, the associated rules will be automatically deployed and enforced throughout the software development lifecycle.\n\n## 4. Relieve configuration challenges\nSetting up and configuring a cloud platform can be a time-consuming and complicated job, but [CI/CD capabilities](/blog/introducing-ci-components/) help automate the configuration process, says Ango. With CI templates, teams can build and deploy applications to different cloud providers or installation targets without having to write their own CI script every time. For instance, [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a collection of pre-configured features and integrations, uses CI/CD templates to handle deployments on each different cloud environment.\n\nThe [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/install/) also can offer integration capabilities for different cloud providers and services. The agent, which helps set up GitOps, automatically deploys workloads to Kubernetes clusters. Any time new changes are made, it pulls them in and deploys them into a cluster.\nAlso, teams can use [GitLab and Terraform for infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/), removing the complexities of making configuration changes repeatable, traceable, and more scalable, which is essential for cloud environments.\n\n## 5. Go multi-cloud\nWhile some companies are making initial moves to the cloud, others are expanding from a single cloud to a multi-cloud environment. This strategy enables organizations to run different workloads on different cloud platforms. Being cloud agnostic means they can use the same development tools and internal processes, and then choose where they want to have their workloads run based on their business needs. Problems can arise, though, when IT teams turn to vendor-locked, cloud native developer tools, which are tailored to their own services and might, or might not, support other cloud environments. Using different tools for each cloud platform isn’t efficient, so it’s key to find tools that are cloud or provider agnostic.\n\n## Uncomplicate cloud migration with a DevSecOps platform\nYes, there are different ways to ease a cloud migration – but do teams have to go out and round up a dozen different tools to ensure their migration is fast, secure, and compliant? No, they don't.\n\n“A lot of teams are realizing that having a single, unified place to simplify, automate, and manage the process of setting up or migrating to the cloud is a game changer,” says Khalid. “With an end-to-end [DevSecOps platform](/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards/), users are able to deploy to any of the common public clouds; support collaboration through features like merge requests, code reviews, and issue tracking; support integrations with a variety of third-party tools; and have built-in security features that allow teams to meet their needs.”\n\nTaking advantage of the GitLab DevSecOps Platform can uncomplicate a lot of those adoption challenges. And GitLab works with any cloud provider.\n\n“I know when people think about the GitLab platform, they focus on security, source code management, and [collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But we also really should be thinking about how it’s a tool that helps organizations get their [workload to the cloud](/blog/shifting-from-on-prem-to-cloud/),” says Ango. “You have to be able to work fast, move fast and deploy fast on whatever cloud environment you need, and do it all securely. That is what GitLab offers. That is a big deal.”\n\n_To find the features — all in one place — that your organization needs to ease and speed a cloud migration, check out this [free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/)._\n","devsecops",[789,1001,721,9],{"slug":1106,"featured":6,"template":701},"five-ways-to-streamline-cloud-adoption","content:en-us:blog:five-ways-to-streamline-cloud-adoption.yml","Five Ways To Streamline Cloud Adoption","en-us/blog/five-ways-to-streamline-cloud-adoption.yml","en-us/blog/five-ways-to-streamline-cloud-adoption",{"_path":1112,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1113,"content":1119,"config":1126,"_id":1128,"_type":14,"title":1129,"_source":16,"_file":1130,"_stem":1131,"_extension":19},"/en-us/blog/forrester-cdra2020",{"title":1114,"description":1115,"ogTitle":1114,"ogDescription":1115,"noIndex":6,"ogImage":1116,"ogUrl":1117,"ogSiteName":686,"ogType":687,"canonicalUrls":1117,"schema":1118},"GitLab and The Forrester Wave: CD and release automation","GitLab named a Strong Performer in Forrester Wave for Continuous Delivery and Release Automation","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/forrester-cdra2020","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and The Forrester Wave: Continuous Delivery and Release Automation Q2 2020\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":1120,"description":1115,"authors":1121,"heroImage":1116,"date":1123,"body":1124,"category":808,"tags":1125},"GitLab and The Forrester Wave: Continuous Delivery and Release Automation Q2 2020",[1122],"Parker Ennis","2020-07-08","\nHere at [GitLab](/company/), we're fundamentally changing the way that organizations develop and deploy their software by offering a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application. Excitingly enough, GitLab was recently cited as a **Strong Performer** in the Forrester Continuous Delivery and Release Automation (CDRA) report for Q2 2020. For this CDRA wave, Forrester evaluated a wide range of vendors to see how their CDRA capabilities stack up in relation to each other and the market at large. Forrester’s evaluation specifically ranks the strengths and weaknesses based on the capabilities of each vendor’s current offering(s), their product vision/strategy going forward, and their market presence to provide an in-depth analysis that companies can use to make the right decisions when it comes to choosing the best CDRA solution for them.\n\nFourteen vendors were interviewed, researched, and analyzed for this report against 26 types of criteria.\n\nFor those interested, you can access this report directly from Forrester on our [commentary page](/analysts/forrester-cdra20/).\n\n#### **A Little Background**\n\n[Continuous delivery](/topics/continuous-delivery/) and release automation is an area that’s quickly evolving to meet the needs of the market. This puts an immense amount of pressure on vendors to innovate just as rapidly in order to not only compete effectively, but to provide customers with the best possible solution and experience. Fortunately, we’re excited to be moving in the right direction and continuously improving our CDRA capabilities at GitLab. Since 2018, we've made significant investments to add new functionality, improve existing capabilities, and bring both our [continuous delivery direction](/direction/release/continuous_delivery/) and [release orchestration](/direction/release/release_orchestration/) visions to life.\n\n#### **Why is CDRA important?**\n\nAs technology and software development continues to advance at this feverish pace, all businesses, not just vendors, are feeling the pressure more than ever to modernize how they build, test, and deploy their applications. You've probably heard \"every company is a software company\" before and that's exactly what it means.\n\nPrioritizing automation over manual development work is at the center of these transformation efforts. Why's this important? Because how a vendor fares in the realm of CDRA has a direct correlation to the strength and maturity of their [CI/CD capabilities](/topics/ci-cd/). As a result, reports like this CDRA wave act as one of many solid indicators for a vendor's ability to help businesses achieve their goals and automate their software development processes. Naturally, CDRA focuses heavily on the release/deployment automation portion of the software delivery lifecycle, but you can't automate deployments without having a solid CI implementation to automate builds/tests first. Taking that into account, an evaluation like this can go a long way in determining whether a given vendor's solution is not only right for your business today, but where you want your business to be in the future.\n\n#### **Key takeaways in the publication**\n\nHere’s what Forrester determined to be the key takeaways for this Wave:\n\n**CloudBees, IBM, Microsoft, Digital.ai, Broadcom, And Flexagon Lead The Pack**\n\n**Visualizations Of Complex Application And Deployment Models Are Key Differentiators**\n\nAs the continuous delivery market continues to consolidate, and with upstream continuous integration capabilities and higher-order management becoming the norm, vendors are competing increasingly on breadth of functionality. The ability to visualize complex application and deployment models continues to be a differentiator, as does the management of deployment outcomes. Appropriate use of advanced analytics and machine learning is also a key factor, with continuing vendor investment resulting in valuable capabilities such as improved release readiness.\n\n#### **Our highlights**\n\n**GitLab is among vendors with highest score** for these categories:\n\n*   Build automation/continuous integration\n*   Deployment and operations\n*   CDRA vision and value proposition\n*   Product innovation\n*   Market approach and viability\n\n**Forrester’s profile of GitLab:**\n\n**“GitLab is expanding its comprehensive platform quickly.**\n\n“GitLab emerged from the continuous integration side of the market and, with its foundation in source control, has strong headwaters capabilities. GitLab supports continuous integration and deployment to cloud-native platforms, but support for legacy platforms is lacking. More recently, the company has added continuous delivery features, including continuous integration and deployment for Kubernetes. The product bases its application modeling on Helm charts, thus requiring Kubernetes to function. The firm grounds its strategy in a very active open source community and a clear ability to execute on this business mode. GitLab distinguishes itself as one of the fastest-innovating vendors in this evaluation.\"\n\n#### **Looking Ahead**\n\nWe’re dedicated to becoming a standard of excellence in CDRA and are working diligently towards our promise of [Progressive Delivery](/direction/ops/#progressive-delivery) becoming a market-leading solution, as well as many other important roadmap targets such as making [release management easier](/direction/release/release_orchestration/) for our users -- with much more in store. For additional details around exactly what we have planned on the roadmap and our overall vision going forward, check out this [Release vision](https://youtu.be/pzGCishRoh4) video overview from our product team, and learn more about [GitLab's continuous delivery](/solutions/continuous-integration/) functionality.\n\nGitLab already excels in numerous parts of the DevOps lifecycle such as SCM, code review, CI, and cloud native development to name a few of our most [mature](/direction/maturity/) functionality areas. Other vendors find themselves in many reports with many different products, but GitLab is the only vendor who has been listed in multiple reports with the same product, reinforcing the strength of GitLab's message of using a single application for the entire DevOps lifecycle.\n\nWe will continue to improve GitLab and provide the best possible solution for organizations to deliver better software faster. Until next time!\n\n_If you have any questions or would like to contact us about this report, you can reach us [here](/company/contact/)._\n",[9,765,808],{"slug":1127,"featured":6,"template":701},"forrester-cdra2020","content:en-us:blog:forrester-cdra2020.yml","Forrester Cdra2020","en-us/blog/forrester-cdra2020.yml","en-us/blog/forrester-cdra2020",{"_path":1133,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1134,"content":1140,"config":1148,"_id":1150,"_type":14,"title":1151,"_source":16,"_file":1152,"_stem":1153,"_extension":19},"/en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"title":1135,"description":1136,"ogTitle":1135,"ogDescription":1136,"noIndex":6,"ogImage":1137,"ogUrl":1138,"ogSiteName":686,"ogType":687,"canonicalUrls":1138,"schema":1139},"From code to production: A guide to continuous deployment with GitLab","Learn how to get started building a robust continuous deployment pipeline in GitLab. Follow these step-by-step instructions, practical examples, and best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659478/Blog/Hero%20Images/REFERENCE_-_Use_this_page_as_a_reference_for_thumbnail_sizes.png","https://about.gitlab.com/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From code to production: A guide to continuous deployment with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benjamin Skierlak\"},{\"@type\":\"Person\",\"name\":\"James Wormwell\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":1135,"description":1136,"authors":1141,"heroImage":1137,"date":1144,"body":1145,"category":1146,"tags":1147},[1142,1143],"Benjamin Skierlak","James Wormwell","2025-01-28","Continuous deployment is a game-changing practice that enables teams to\ndeliver value faster, with higher confidence. However, diving into advanced\ndeployment workflows — such as GitOps, container orchestration with\nKubernetes, or dynamic environments — can be intimidating for teams just\nstarting out.\n\n\nAt GitLab, we're committed to making delivery seamless and scalable. By\nenabling teams to focus on the fundamentals, we empower them to build a\nstrong foundation that supports growth into more complex strategies over\ntime. This guide provides essential steps to begin implementing continuous\ndeployment with GitLab, laying the foundation for your long-term success.\n\n\n## Start with a workflow plan\n\n\nBefore diving into the technical implementation, take time to map out your\ndeployment workflow. Success lies in careful planning and a methodical\napproach.\n\n\n### Artifact management strategy\n\n\nIn the context of continuous deployment, artifacts are the packaged outputs\nof your build process that need to be stored, versioned, and deployed. These\ncould be:\n\n\n- container images for your applications\n\n- packages\n\n- compiled binaries or executables\n\n- libraries\n\n- configuration files\n\n- documentation packages\n\n- other artifacts\n\n\nEach type of artifact plays a specific role in your deployment process. For\nexample, a typical web application might generate:\n\n\n- a container image for the backend service\n\n- a ZIP archive of compiled frontend assets\n\n- SQL files for database changes\n\n- environment-specific configuration files\n\n\nManaging these artifacts effectively is crucial for successful deployments.\nHere's how to approach artifact management.\n\n\n#### Artifacts and releases versioning strategies\n\n\nA best practice to get you started with a clean structure is to establish a\nclear versioning strategy for your artifacts. When creating releases:\n\n\n- Use semantic versioning (major.minor.patch) for release tags\n  - Example: `myapp:1.2.3` for a stable release\n  - Major version changes (2.0.0) for breaking changes\n  - Minor version changes (1.3.0) for new features\n  - Patch version changes (1.2.4) for bug fixes\n- Maintain a 'latest' tag for the most recent stable version\n  - Example: `myapp:latest` for automated deployments\n- Include commit SHA for precise version tracking\n  - Example: `myapp:1.2.3-abc123f` for debugging\n- Consider branch-based tags for development environments\n  - Example: `myapp:feature-user-auth` for feature testing\n\n#### Build artifacts retention\n\n\nImplement defined retention rules:\n\n\n- Set explicit expiration timeframes for temporary artifacts\n\n- Define which artifacts need permanent retention\n\n- Configure cleanup policies to manage storage\n\n\n#### Registry access and authentication\n\n\nSecure your artifacts with proper access controls:\n\n\n- Implement Personal Access Tokens for developer access\n\n- Configure CI/CD variables for pipeline authentication\n\n- Set up proper access scopes\n\n\n### Environment strategy\n\n\nConsider your environments early, as they shape your entire deployment\npipeline:\n\n\n- Development, staging, and production environment configurations\n\n- Environment-specific variables and secrets\n\n- Access controls and protection rules\n\n- Deployment tracking and monitoring approach\n\n\n### Deployment targets\n\n\nBe intentional as to where and how you'll deploy, these decisions matter and\nthe benefits and drawbacks of each should be consider:\n\n\n- Infrastructure requirements (VMs, containers, cloud services)\n\n- Network access and security configurations\n\n- Authentication mechanisms (SSH keys, access tokens)\n\n- Resource allocation and scaling considerations\n\n\nWith our strategy defined and foundational decisions made, we can now\ntranslate these plans into a working pipeline. We'll build a practical\nexample that demonstrates these concepts, starting with a simple application\nand progressively adding deployment capabilities.\n\n\n## Implementing your CD pipeline\n\n\n### A step-by-step example\n\n\nLet's walk through implementing a basic continuous deployment pipeline for a\nweb application. We'll use a simple HTML application as an example, but\nthese principles apply to any type of application. We’re also going to\ndeploy our application as a Docker image on a simple virtual machine. This\nwill allow us to lean on a curated image with minimum dependencies, and to\nensure no environment specific requirements are unintentionally brought in.\nBy working on a virtual machine, we won’t be leveraging GitLab’s native\nintegrations, allowing us to work on an easier but less scalable setup to\nbegin with.\n\n\n#### Prerequisites\n\n\nIn this example, we’ll aim to containerize an application that we’ll run on\na virtual machine hosted on a cloud provider. We’ll also test this\napplication locally on our machine. This list of prerequisites is only\nneeded for this scenario.\n\n\n##### Virtual machine setup\n\n\n- Provision a VM in your preferred cloud provider (e.g., GCP, AWS, Azure)\n\n- Configure network rules to allow access on ports 22, 80, and 443\n\n- Record the machine's public IP address for deployment\n\n\n##### Set up SSH authentication:\n\n\n- Generate a public/private key pair for the machine\n\n- In GitLab, go to **Settings > CI/CD > Variables**\n\n- Create a variable called `GITLAB_KEY`\n\n- Set Type to \"File\" (required for SSH authentication)\n\n- Paste the private key in the Value field\n\n- Define a USER variable, this is the user logging in and running the\nscripts on your VM\n\n\n##### Configure deployment variables\n\n\n- Create variables for your deployment targets:\n  - `STAGING_TARGET`: Your staging server IP/domain\n  - `PRODUCTION_TARGET`: Your production server IP/domain\n\n##### Local development setup\n\n\n- Install Docker on your local machine for testing deployments\n\n\n##### GitLab Container Registry access\n\n\n- Locate your registry path:\n  - Navigate to **Deploy > Container Registry**\n  - Copy the registry path (e.g., registry.gitlab.com/group/project)\n- Set up authentication:\n  - Go to **Settings > Access Tokens**\n  - Create a new token with registry access\n  - Token expiration: Maximum 1 year\n  - Save the token securely\n- Configure local registry access:\n\n\n```\n\ndocker login registry.gitlab.com\n\n# The username if you are using a PAT is gitlab-ci-token\n\n# Password: your-access-token\n\n```\n\n\n#### 1. Create your application\n\n\nStart with a basic web application. For our example, we're using a simple\nHTML page:\n\n\n```\n\n\u003C!-- index.html -->\n\n\u003Chtml>\n  \u003Chead>\n    \u003Cstyle>\n      body {\n        background-color: #171321; /* GitLab dark */\n      }\n    \u003C/style>\n  \u003C/head>\n  \u003Cbody>\n    \u003C!-- Your content here -->\n  \u003C/body>\n\u003C/html>\n\n```\n\n\n#### 2. Containerize your application\n\n\nCreate a Dockerfile to package your application:\n\n\n```\n\nFROM nginx:1.26.2\n\nCOPY index.html /usr/share/nginx/html/index.html\n\n```\n\n\nThis Dockerfile:\n\n\n- Uses nginx as a base image for serving web content\n\n- Copies your HTML file to the correct location in the nginx directory\nstructure\n\n\n#### 3. Set up your CI/CD pipeline\n\n\nCreate a `.gitlab-ci.yml` file to define your pipeline stages:\n\n\n```\n\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n\nstages:\n  - publish\n  - deploy\n```\n\n\nLet's break it down:\n\n\n`TAG_LATEST` is made up of three parts:\n\n\n- `$CI_REGISTRY_IMAGE` is the path to your project's container registry in\nGitLab\n\n\nFor example: `registry.gitlab.com/your-group/your-project`\n\n\n- `$CI_COMMIT_REF_NAME` is the name of your branch or tag\n\n\nFor example, if you're on main branch: `/main`, and if you're on a feature\nbranch: `/feature-login`\n\n\n- `:latest` is a fixed suffix\n\n\nSo if you're on the main branch, `TAG_LATEST` becomes:\n`registry.gitlab.com/your-group/your-project/main:latest`.\n\n\n`TAG_COMMIT` is almost identical, but instead of `:latest`, it uses:\n`$CI_COMMIT_SHA` which is the commit identifier, for example:\n`:abc123def456`.\n\n\nSo for that same commit on main branch, `TAG_COMMIT` becomes:`\nregistry.gitlab.com/your-group/your-project/main:abc123def456`.\n\n\nThe reason for having both is `TAG_LATEST` gives you an easy way to always\nget the newest version, and `TAG_COMMIT` gives you a specific version you\ncan return to if needed.\n\n\n#### 4. Publish to the container registry\n\n\nAdd the publish job to your pipeline:\n\n\n```\n\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n```\n\n\nThis job:\n\n\n- Uses Docker-in-Docker to build images\n\n- Creates two tagged versions of your image\n\n- Authenticates with the GitLab registry\n\n- Pushes both versions to the registry \n\n\nNow that our images are safely stored in the registry, we can focus on\ndeploying them to our target environments. Let's start with local testing to\nvalidate our setup before moving to production deployments.\n\n\n#### 5. Deploy to your environment\n\n\nBefore deploying to production, you can test locally. We just published our\nimage to the GitLab repository, which we’ll pull locally. If you’re unsure\nof the exact path, navigate to **Deploy > Container Registry**, and you\nshould see an icon to copy the path of your image at the end of the line for\nthe container image you want to test.\n\n\n```\n\ndocker login registry.gitlab.com \n\ndocker run -p 80:80 registry.gitlab.com/your-project-path/main:latest\n\n```\n\n\nBy doing so you should be able to access your application locally on your\nlocalhost address through your web browser.\n\n\nYou can now add a deployment job to your pipeline:\n\n\n```\n\ndeploy:\n  stage: deploy\n  image: alpine:latest\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$TARGET_SERVER \n      docker pull $TAG_COMMIT &&\n      docker rm -f myapp || true &&\n      docker run -d -p 80:80 --name myapp $TAG_COMMIT\n```\n\n\nThis job:\n\n\n- Sets up SSH access to your deployment target\n\n- Pulls the latest image\n\n- Removes any existing container\n\n- Deploys the new version\n\n\n#### 6. Track deployments\n\n\nEnable deployment tracking by adding environment configuration:\n\n\n```\n\ndeploy:\n  environment:\n    name: production\n    url: https://your-application-url.com \n```\n\n\nThis creates an environment object in GitLab's **Operate > Environments**\nsection, providing:\n\n\n- Deployment history\n\n- Current deployment status\n\n- Quick access to your application\n\n\nWhile a single environment pipeline is a good starting point, most teams\nneed to manage multiple environments for proper testing and staging. Let's\nexpand our pipeline to handle this more realistic scenario.\n\n\n#### 7. Set up multiple environments\n\n\nFor a more robust pipeline, configure staging and production deployments:\n\n\n```\n\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\nstaging:\n  stage: staging\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  environment:\n    name: staging\n    url: https://staging.your-app.com\n  # deployment script here\n\nproduction:\n  stage: production\n  rules:\n    - if: $CI_COMMIT_TAG\n  environment:\n    name: production\n    url: https://your-app.com\n  # deployment script here\n```\n\n\nThis setup:\n\n\n- Deploys to staging from your main branch\n\n- Uses GitLab tags to trigger production deployments\n\n- Provides separate tracking for each environment\n\n\nHere and in our next step, we’re leveraging a very useful GitLab feature:\ntags. By manually creating a tag in the **Code > Tags** section, the\n`$CI_COMMIT_TAG` gets created, which allows us to trigger jobs accordingly.\n\n\n#### 8. Create automated release notes\n\n\nWe'll be using GitLab's release capabilities through our CI/CD pipeline.\nFirst, update your stages in `.gitlab-ci.yml`:\n\n\n```\n\nstages:\n\n\n- publish\n\n- staging\n\n- release # New stage for releases\n\n- version\n\n- production\n\n```\n\n\nNext, add the release job:\n\n\n```\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG                  # Only run when a tag is created\n  script:\n    - echo \"Creating release for $CI_COMMIT_TAG\"\n  release:                                # Release configuration\n    name: 'Release $CI_COMMIT_TAG'\n    description: 'Release created from $CI_COMMIT_TAG'\n    tag_name: '$CI_COMMIT_TAG'           # The tag to create\n    ref: '$CI_COMMIT_TAG'                # The tag to base release on\n```\n\n\nYou can enhance this by adding links to your container images:\n\n\n```\n\nrelease:\n  name: 'Release $CI_COMMIT_TAG'\n  description: 'Release created from $CI_COMMIT_TAG'\n  tag_name: '$CI_COMMIT_TAG'\n  ref: '$CI_COMMIT_TAG'\n  assets:\n    links:\n      - name: 'Container Image'\n        url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n        link_type: 'image'\n```\n\n\nFor meaningful automated release notes:\n\n\n- Use conventional commits (feat:, fix:, etc.)\n\n- Include issue numbers (#123)\n\n- Separate subject from body with blank line\n\n\nIf you want custom release notes with deployment info:\n\n\n```\n\nrelease_job:\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    description: './release_notes.md'\n```\n\n\nOnce configured, releases will be created automatically when you create a\nGit tag. You can view them in GitLab under **Deploy > Releases**.\n\n\n#### 9. Put it all together\n\n\nThis is what our final YAML file looks like:\n\n\n```\n\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n  STAGING_TARGET: $STAGING_TARGET    # Set in CI/CD Variables\n  PRODUCTION_TARGET: $PRODUCTION_TARGET  # Set in CI/CD Variables\n\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\n# Build and publish to registry\n\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n\n# Deploy to staging\n\nstaging:\n  stage: staging\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$STAGING_TARGET \"\n        docker pull $TAG_COMMIT &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $TAG_COMMIT\"\n  environment:\n    name: staging\n    url: http://$STAGING_TARGET\n\n# Create release\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: './release_notes.md'\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_TAG'\n    assets:\n      links:\n        - name: 'Container Image'\n          url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n          link_type: 'image'\n\n# Version the image with release tag\n\nversion_job:\n  stage: version\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - docker pull $TAG_COMMIT\n    - docker tag $TAG_COMMIT $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n\n# Deploy to production\n\nproduction:\n  stage: production\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$PRODUCTION_TARGET \"\n        docker pull $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\"\n  environment:\n    name: production\n    url: http://$PRODUCTION_TARGET\n```\n\n\nThis complete pipeline:\n\n\n- Publishes images to the registry (main branch)\n\n- Deploys to staging (main branch)\n\n- Creates releases (on tags)\n\n- Versions images with release tags\n\n- Deploys to production (on tags)\n\n\nKey benefits:\n\n\n- Clean reproducible, local development and testing environment\n\n- Clear path to production environments with structure to build confidence\nin what is deployed\n\n- Pattern to recover from unexpected failures, etc.\n\n- Ready to scale/adopt more complex deployment strategies\n\n\n### Best practices\n\n\nThroughout implementation, maintain these principles:\n\n\n- Document everything, from variable usage to deployment procedures\n\n- Use GitLab's built-in features (environments, releases, registry)\n\n- Implement proper access controls and security measures\n\n- Plan for failure with robust rollback procedures\n\n- Keep your pipeline configurations DRY (Don't Repeat Yourself)\n\n\n## Scale your deployment strategy\n\n\nWhat next? Here are some aspects to consider as your continuous deployment\nstrategy matures.\n\n\n### Advanced security measures\n\n\nEnhance security through:\n\n\n- Protected environments with restricted access\n\n- Required approvals for production deployments\n\n- Integrated security scanning\n\n- Automated vulnerability assessments\n\n- Branch protection rules for deployment-related changes\n\n\n### Progressive delivery strategies\n\n\nImplement advanced deployment strategies:\n\n\n- Feature flags for controlled rollouts\n\n- Canary deployments for risk mitigation\n\n- Blue-green deployment strategies\n\n- A/B testing capabilities\n\n- Dynamic environment management\n\n\n### Monitoring and optimization\n\n\nEstablish robust monitoring practices:\n\n\n- Track deployment metrics\n\n- Set up performance monitoring\n\n- Configure deployment alerts\n\n- Establish deployment SLOs\n\n- Regular pipeline optimization\n\n\n## Why GitLab?\n\n\nGitLab's continuous deployment capabilities make it a standout choice for\nmodern deployment workflows. The platform excels in streamlining the path\nfrom code to production, offering built-in container registry, environment\nmanagement, and deployment tracking all within a single interface. GitLab's\nenvironment-specific variables, deployment approval gates, and rollback\ncapabilities provide the security and control needed for production\ndeployments, while features like review apps and feature flags enable\nprogressive delivery approaches. As part of GitLab's complete DevSecOps\nplatform, these CD capabilities seamlessly integrate with your entire\nsoftware lifecycle.\n\n\n## Get started today\n\n\nThe journey to continuous deployment is an evolution, not a revolution.\nStart with the fundamentals, build a solid foundation, and gradually\nincorporate advanced features as your team's needs grow. GitLab provides the\ntools and flexibility to support you at every stage of this journey, from\nyour first automated deployment to complex, multi-environment delivery\npipelines.\n\n\n> Sign up for a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/devsecops/) to get started\nwith continous deployment today.\n","product",[9,109,917,1146,766],{"slug":1149,"featured":6,"template":701},"from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","content:en-us:blog:from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","From Code To Production A Guide To Continuous Deployment With Gitlab","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"_path":1155,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1156,"content":1162,"config":1169,"_id":1171,"_type":14,"title":1172,"_source":16,"_file":1173,"_stem":1174,"_extension":19},"/en-us/blog/getting-started-with-gitlab-application-security",{"title":1157,"description":1158,"ogTitle":1157,"ogDescription":1158,"noIndex":6,"ogImage":1159,"ogUrl":1160,"ogSiteName":686,"ogType":687,"canonicalUrls":1160,"schema":1161},"Getting started with GitLab application security","This tutorial shows how to incorporate GitLab security scan templates into a .gitlab-ci.yml file and view scan results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/getting-started-with-gitlab-application-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab application security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Hernandez\"},{\"@type\":\"Person\",\"name\":\"Julie Byrne\"}],\n        \"datePublished\": \"2023-03-15\",\n      }",{"title":1157,"description":1158,"authors":1163,"heroImage":1159,"date":1166,"body":1167,"category":741,"tags":1168},[1164,1165],"Victor Hernandez","Julie Byrne","2023-03-15","As software security becomes increasingly important, many companies want to\nintroduce standard code scanning processes into development workflows to\nfind and remediate security vulnerabilities before they get to production.\nGitLab's DevSecOps Platform allows users to perform security scans in CI/CD\npipelines, which can easily be enabled to check applications for security\nvulnerabilities such as unauthorized access, data leaks, and denial of\nservice (DoS) attacks. While most of what is covered in this blog will\npertain to Ultimate features, there are some features available for free and\nPremium tier users as well. By the end of this blog, you will have a solid\nstarting point for adopting GitLab security scans, with any tier license,\nand understand the steps to take next to mature your DevSecOps practices.\n\n\n## Prerequisites\n\nTo enable security scanning for a project, you must have the following:\n\n- a GitLab project that meets the requirements of the security scan you\nchoose to enable, with CI enabled\n\n- a `.gitlab-ci.yml` file for the project that has at least a build job\ndefined\n\n- a Linux-based GitLab Runner with the Docker or Kubernetes executor\n\n\n## Get started: Add a scan template to your pipeline\n\n\nHere are the first steps to introduce security scanning.\n\n\n### Available security scans\n\n\nGitLab provides a variety of security scanners, each with its own set of\ncriteria for adoption:\n\n\n| Scan type | Minimum tier | Prerequisites | Application requirements |\n\n| --- | --- | --- | --- |\n\n| [Static application security testing\n(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) | Free |\nNone | See [SAST\nrequirements](https://docs.gitlab.com/ee/user/application_security/sast/index.html#requirements)\n|\n\n| [Secret\ndetection](https://docs.gitlab.com/ee/user/application_security/secret_detection/)\n| Free | None | None |\n\n| [Container\nscanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n| Free | Container image built and pushed to registry | [Docker 18.09.03 or\nhigher installed on the same computer as the\nrunner](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#requirements);\nimage uses a [supported\ndistribution](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#supported-distributions)\n|\n\n| [Infrastructure as code (IaC)\nscanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/)\n|  Free | None | See [supported languages and\nframeworks](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#supported-languages-and-frameworks)\n|\n\n| [Dependency\nscanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- includes license compliance | Ultimate | None | Application must use one\nof the [supported languages and package\nmanagers](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html#supported-languages-and-package-managers)\n|\n\n| [Dynamic application security testing\n(DAST)](https://docs.gitlab.com/ee/user/application_security/dast/) |\nUltimate | [Deployed target\napplication](https://docs.gitlab.com/ee/user/application_security/dast/index.html#prerequisites)\n| See [GitLab DAST scanning\noptions](https://docs.gitlab.com/ee/user/application_security/dast/index.html#gitlab-dast)\n|\n\n| [Coverage-guided fuzz\ntesting](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/)\n| Ultimate | Instrumented version of application | See [supported fuzzing\nengines and\nlanguages](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/index.html#supported-fuzzing-engines-and-languages)\n|\n\n| [Web API fuzz\ntesting](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/)\n|  Ultimate | Deployed target application | See [supported API\ntypes](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/#enable-web-api-fuzzing)\n|\n\n\nMany customers will start with secret detection, dependency scanning, or\nSAST scanning, as they have the fewest requirements for usage.\n\n\n### Add the scanner template\n\n\nGitLab provides a [CI template for each security\nscan](https://docs.gitlab.com/ee/user/application_security/#security-scanning-without-auto-devops)\nthat can be added to your existing `.gitlab-ci.yml` file. This can be done\nby manually editing the CI file and adding the appropriate template path in\nthe templates section of the file. Several scanners can also be [enabled via\nthe\nUI](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-in-the-ui),\nwhere a merge request will be created to add the appropriate scanner to the\n`.gitlab-ci.yml` file. \n\n\nI will use a simple spring boot application as an example and enable\ndependency scanning, a scanner that is popular amongst our customers, as my\nfirst security scan. Dependency scanning will find vulnerabilities in the\nlibraries I am using to build my application. My project is a Java\napplication built via Maven and includes a `pom.xml` file, so it meets the\nrequirements for dependency scanning. Since dependency scanning can be\nenabled via the UI, I'm going to take advantage of that feature here. \n\n\nFor this project, I have created a `.gitlab-ci.yml` file that contains a\nbuild and test stage and a build job. I'm using the Auto DevOps auto-build\njob, but you can define your own build job if desired. This is the starting\npipeline code in my `.gitlab-ci.yml` file:\n\n\n```\n\nimage: alpine:latest\n\n\ninclude:\n  - template: Jobs/Build.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab-foss/blob/master/lib/gitlab/ci/templates/Jobs/Build.gitlab-ci.yml\n\nstages:\n\n- build\n\n- test\n\n\n```\n\n\nTo enable dependency scanning, I'll first navigate to the **Security &\nCompliance** menu, **Configuration** sub-menu.\n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/security_config.png){:\n.shadow}\n\n\nThe option to enable dependency scanning is available about halfway down the\npage. When I click `Configure with a merge request`, a branch is created and\nI am prompted to create a corresponding draft merge request. I'll click\n`Create Merge Request` to save the merge request.\n\n\nOnce the merge request has been created, I see that a new branch\n`set-dependency-scanning-config-1` has been created and the `.gitlab-ci.yml`\nfile has been updated with this code:\n\n\n```\n\n# You can override the included template(s) by including variable overrides\n\n# SAST customization:\nhttps://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings\n\n# Secret Detection customization:\nhttps://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings\n\n# Dependency Scanning customization:\nhttps://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings\n\n# Container Scanning customization:\nhttps://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings\n\n# Note that environment variables can be set in several places\n\n# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence\n\nimage: alpine:latest\n\ninclude:\n\n- template: Jobs/Build.gitlab-ci.yml\n\n- template: Security/Dependency-Scanning.gitlab-ci.yml\n\nstages:\n\n- build\n\n- test\n\n\n```\n\n\nThe change kicks off a pipeline, which will now include the dependency scan.\n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/dependency_job.png){:\n.shadow}\n\n\n## View results of the security scan\n\n\nFor all license tiers, you can view the results of any security scan jobs in\nthe appropriate JSON report that can be downloaded from the merge request.\n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/mr_artifacts.png){:\n.shadow}\n\n\nWith GitLab Ultimate, you will also see the vulnerabilities found by the\nscan in the merge request widget.\n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/mr_widget.png){:\n.shadow}\n\n\nAt this point, the `.gitlab-ci.yml` changes that enable security scanning\nare only available in the `set-dependency-scanning-config-1` branch. I will\nmerge them to `main` so that the changes will be included in all future\nfeature branches.\n\n\nWith GitLab Ultimate, merging to `main` will also provide the baseline\n**Vulnerability Report** for our application.  \n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/vuln_report.png){:\n.shadow}\n\n\nNow, scan results presented in the merge request widget for any new merge\nrequests will only show vulnerabilities introduced by those new code changes\nin the corresponding feature branch, and not the baseline of vulnerabilities\nthat already exist on `main`.\n\n\n## Scan enforcement\n\n\nOnce you have enabled your first scans in your CI/CD pipelines, you might be\ncurious to know how you can enforce security scans, or enforce a review and\napproval when critical vulnerabilities are found in new code changes. I\nrecommend reviewing these resources that cover these topics. \n - For Ultimate customers: [How to ensure separation of duties and enforce compliance with GitLab](/blog/ensuring-compliance/)\n - For Premium customers: [How to action security vulnerabilities in GitLab Premium](https://about.gitlab.com/blog/actioning-security-vulnerabilities-in-gitlab-premium/)\n\nNow that you've gained comfort with security scanners as part of the GitLab\nCI/CD pipeline, check out our [Getting Started with GitLab Application\nSecurity](https://docs.gitlab.com/ee/user/application_security/get-started-security.html)\ndocumentation for recommended next steps.\n\n\n## More resources\n - [How GitLab's application security dashboard helps AppSec engineers](/blog/secure-stage-for-appsec/)\n - [Running security scans in limited connectivity and offline environments](/blog/offline-environments/)\n - [GitLab's newest continuous compliance features bolster software supply chain security](/blog/gitlabs-newest-continuous-compliance-features-bolster-software/)\n",[789,721,9,787],{"slug":1170,"featured":6,"template":701},"getting-started-with-gitlab-application-security","content:en-us:blog:getting-started-with-gitlab-application-security.yml","Getting Started With Gitlab Application Security","en-us/blog/getting-started-with-gitlab-application-security.yml","en-us/blog/getting-started-with-gitlab-application-security",{"_path":1176,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1177,"content":1183,"config":1189,"_id":1191,"_type":14,"title":1192,"_source":16,"_file":1193,"_stem":1194,"_extension":19},"/en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"title":1178,"description":1179,"ogTitle":1178,"ogDescription":1179,"noIndex":6,"ogImage":1180,"ogUrl":1181,"ogSiteName":686,"ogType":687,"canonicalUrls":1181,"schema":1182},"Getting started with GitLab: Understanding CI/CD","Learn the basics of continuous integration/continuous delivery in this beginner's guide, including what CI/CD components are and how to create them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659525/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25.png","https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Understanding CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-04-25\",\n      }",{"title":1178,"description":1179,"authors":1184,"heroImage":1180,"date":1186,"body":1187,"category":1146,"tags":1188},[1185],"GitLab","2025-04-25","*Welcome to our \"Getting started with GitLab\" series, where we help\nnewcomers get familiar with the GitLab DevSecOps platform.*\n\n\nImagine a workflow where every code change is automatically built, tested,\nand deployed to your users. That's the power of [Continuous\nIntegration/Continuous Delivery\n(CI/CD)](https://about.gitlab.com/topics/ci-cd/)! CI/CD helps you catch bugs\nearly, ensures code quality, and delivers software faster and more\nfrequently.\n\n\n### What is CI/CD?\n\n\n* **Continuous Integration** is a development practice where developers\nintegrate code changes into a shared repository frequently, preferably\nseveral times a day. Each integration is then verified by an automated build\nand test process, allowing teams to detect problems early.  \n\n* **Continuous Delivery** extends CI by automating the release pipeline,\nensuring that your code is *always* in a deployable state. You can deploy\nyour application to various environments (e.g., staging, production) with a\nsingle click or automatically.  \n\n* **Continuous Deployment** takes it a step further by automatically\ndeploying *every successful build* to production. This requires a high\ndegree of confidence in your automated tests and deployment process.\n\n\n### Why GitLab CI/CD?\n\n\nGitLab CI/CD is a powerful, integrated system that comes built-in with\nGitLab. It offers a seamless experience for automating your entire software\ndevelopment lifecycle. With GitLab CI/CD, you can:\n\n\n* **Automate everything:** Build, test, and deploy your applications with\nease.  \n\n* **Catch bugs early:** Detect and fix errors before they reach\nproduction.  \n\n* **Get faster feedback:** Receive immediate feedback on your code\nchanges.  \n\n* **Improve collaboration:** Work together more effectively with automated\nworkflows.  \n\n* **Accelerate delivery:** Release software faster and more frequently.  \n\n* **Reduce risk:** Minimize deployment errors and rollbacks.\n\n\n### The elements of GitLab CI/CD\n\n\n* `.gitlab-ci.yml`**:** This [YAML\nfile](https://docs.gitlab.com/ee/ci/yaml/), located in your project's root\ndirectory, defines your CI/CD pipeline, including stages, jobs, and\nrunners.  \n\n* [**GitLab Runner**](https://docs.gitlab.com/runner/)**:** This agent\nexecutes your CI/CD jobs on your infrastructure (e.g. physical machines,\nvirtual machines, Docker containers, or Kubernetes clusters).  \n\n* [**Stages**](https://docs.gitlab.com/ee/ci/yaml/#stages)**:** Stages\ndefine the order of execution for your jobs (e.g. build, test, and\ndeploy).  \n\n* [**Jobs**](https://docs.gitlab.com/ee/ci/yaml/#job-keywords)**:** Jobs are\nindividual units of work within a stage (e.g. compile code, run tests, and\ndeploy to staging).\n\n\n### Setting up GitLab CI\n\n\nGetting started with GitLab CI is simple. Here's a basic example of a\n`.gitlab-ci.yml` file:\n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the application...\"\n\ntest_job:\n  stage: test\n  script:\n    - echo \"Running tests...\"\n\ndeploy_job:\n  stage: deploy\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n\n```\n\n\nThis configuration defines three stages: \"build,\" \"test,\" and \"deploy.\" Each\nstage contains a job that executes a simple script.\n\n\n### CI/CD configuration examples\n\n\nLet's explore some more realistic examples.\n\n\n**Building and deploying a Node.js application**\n\n\nThe pipeline definition below outlines using npm to build and test a Node.js\napplication and [dpl](https://docs.gitlab.com/ci/examples/deployment/) to\ndeploy the application to Heroku. The deploy stage of the pipeline makes use\nof [GitLab CI/CD variables](https://docs.gitlab.com/ci/variables/), which\nallow developers to store sensitive information (e.g. credentials) and\nsecurely use them in CI/CD processes. In this example, an API key to deploy\nto Heroku is stored under the variable key name `$HEROKU_API_KEY` used by\nthe dpl tool.\n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  image: node:latest\n  script:\n    - npm install\n    - npm run build\n\ntest:\n  stage: test\n  image: node:latest\n  script:\n    - npm run test\n\ndeploy:\n  stage: deploy\n  image: ruby:latest\n  script:\n    - gem install dpl\n    - dpl --provider=heroku --app=$HEROKU_APP_NAME --api-key=$HEROKU_API_KEY\n\n```\n\n\n**Deploying to different environments (staging and production)**\n\n\nGitLab also offers the idea of\n[Environments](https://docs.gitlab.com/ci/environments/) with CI/CD. This\nfeature allows users to track deployments from CI/CD to infrastructure\ntargets. In the example below, the pipeline adds stages with an environment\nproperty for a staging and production environment. While the deploy_staging\nstage will always run its script, the deploy_production stage requires\nmanual approval to prevent accidental deployment to production.  \n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy_staging\n  - deploy_production\n\nbuild:\n  # ...\n\ntest:\n  # ...\n\ndeploy_staging:\n  stage: deploy_staging\n  script:\n    - echo \"Deploying to staging...\"\n  environment:\n    name: staging\n\ndeploy_production:\n  stage: deploy_production\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n  when: manual  # Requires manual approval\n\n```\n\n\n### GitLab Auto DevOps\n\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)\nsimplifies CI/CD by providing a pre-defined configuration that automatically\nbuilds, tests, and deploys your applications. It leverages best practices\nand industry standards to streamline your workflow.\n\n\nTo enable Auto DevOps:\n\n\n1. Go to your project's **Settings > CI/CD > General pipelines**.  \n\n2. Enable the **Auto DevOps** option.\n\n\nAuto DevOps automatically detects your project's language and framework and\nconfigures the necessary build, test, and deployment stages. You don’t even\nneed to create a `.gitlab-ci.yml` file.\n\n\n### CI/CD Catalog\n\n\nThe [CI/CD\nCatalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\nis a list of projects with published [CI/CD\ncomponents](https://docs.gitlab.com/ee/ci/components/) you can use to extend\nyour CI/CD workflow. Anyone can create a component project and add it to the\nCI/CD Catalog or contribute to an existing project to improve the available\ncomponents. You can find published components in the [CI/CD\nCatalog](https://gitlab.com/explore/catalog) on GitLab.com.\n\n\n> [Tutorial: How to set up your first GitLab CI/CD\ncomponent](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n\n\n### CI templates\n\n\nYou can also create your own [CI\ntemplates](https://docs.gitlab.com/ee/ci/examples/) to standardize and reuse\nCI/CD configurations across multiple projects. This promotes consistency and\nreduces duplication.\n\n\nTo create a CI template:\n\n\n1. Create a `.gitlab-ci.yml` file in a dedicated project or repository.  \n\n2. Define your CI/CD configuration in the template.  \n\n3. In your project's `.gitlab-ci.yml` file, use the `include` keyword to\ninclude the template.\n\n\n## Take your development to the next level\n\n\nGitLab CI/CD is a powerful tool that can transform your development\nworkflow. By understanding the concepts of CI/CD, configuring your\npipelines, and leveraging features like Auto DevOps, the CI/CD Catalog, and\nCI templates, you can automate your entire software development lifecycle\nand deliver high-quality software faster and more efficiently.\n\n\n> Want to take your learning to the next level? Sign up for [GitLab\nUniversity courses](https://university.gitlab.com/). Or you can get going\nright away with a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/).\n\n\n## \"Getting Started with GitLab\" series\n\n\nCheck out more articles in our \"Getting Started with GitLab\" series:\n\n\n- [How to manage\nusers](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n\n- [How to import your projects to\nGitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n\n- [Mastering project\nmanagement](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n\n- [Automating Agile workflows with the gitlab-triage\ngem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n\n- [Working with CI/CD\nvariables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[109,721,9,496,1146,766],{"slug":1190,"featured":91,"template":701},"getting-started-with-gitlab-understanding-ci-cd","content:en-us:blog:getting-started-with-gitlab-understanding-ci-cd.yml","Getting Started With Gitlab Understanding Ci Cd","en-us/blog/getting-started-with-gitlab-understanding-ci-cd.yml","en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"_path":1196,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1197,"content":1202,"config":1208,"_id":1210,"_type":14,"title":1211,"_source":16,"_file":1212,"_stem":1213,"_extension":19},"/en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"title":1198,"description":1199,"ogTitle":1198,"ogDescription":1199,"noIndex":6,"ogImage":1180,"ogUrl":1200,"ogSiteName":686,"ogType":687,"canonicalUrls":1200,"schema":1201},"Getting started with GitLab: Working with CI/CD variables","Learn what CI/CD variables are, why they are important in DevSecOps, and best practices for utilizing them.","https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Working with CI/CD variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2025-05-27\",\n      }",{"title":1198,"description":1199,"authors":1203,"heroImage":1180,"date":1205,"body":1206,"category":1146,"tags":1207},[1204],"GitLab Team","2025-05-27","*Welcome to our \"Getting started with GitLab\" series, where we help\nnewcomers get familiar with the GitLab DevSecOps platform.*\n\n\nIn an earlier article, we explored [GitLab\nCI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/).\nNow, let's dive deeper into the world of **CI/CD variables** and unlock\ntheir full potential.\n\n\n### What are CI/CD variables?\n\n\nCI/CD variables are dynamic key-value pairs that you can define at different\nlevels within your GitLab environment (e.g., project, group, or instance).\nThese variables act as placeholders for values that you can use in your\n`.gitlab-ci.yml` file to customize your pipelines, securely store sensitive\ninformation, and make your CI/CD configuration more maintainable.\n\n\n### Why are CI/CD variables important?\n\n\nCI/CD variables offer numerous benefits:\n\n\n* **Flexibility** - Easily adapt your pipelines to different environments,\nconfigurations, or deployment targets without modifying your core CI/CD\nscript.  \n\n* **Security** - Securely store sensitive information like API keys,\npasswords, and tokens, preventing them from being exposed directly in your\ncode.  \n\n* **Maintainability** - Keep your CI/CD configuration clean and organized by\ncentralizing values in variables, making updates and modifications easier.  \n\n* **Reusability** - Define variables once and reuse them across multiple\nprojects, promoting consistency and reducing duplication.\n\n\n### Scopes of CI/CD variables: Project, group, and instance\n\n\nGitLab allows you to define CI/CD variables with different scopes,\ncontrolling their visibility and accessibility:\n\n\n* **Project-level variables** - These variables are specific to a single\nproject and are ideal for storing project-specific settings, such as:\n  * Deployment URLs: Define different URLs for staging and production environments.  \n  * Database credentials: Store database connection details for testing or deployment.  \n  * Feature flags: Enable or disable features during different stages of your pipeline.  \n  * Example: You have a project called \"MyWebApp\" and want to store the production deployment URL. You create a project-level variable named `DPROD_DEPLOY_URL` with the value `https://mywebapp.com`.  \n* **Group-level variables** - These variables are shared across all projects\nwithin a GitLab group. They are useful for settings that are common to\nmultiple projects, such as:\n\n  * API keys for shared services: Store API keys for services like AWS, Google Cloud, or Docker Hub that are used by multiple projects within the group.  \n  * Global configuration settings: Define common configuration parameters that apply to all projects in the group.  \n  * Example: You have a group called \"Web Apps\" and want to store an API key for Docker Hub. You create a group-level variable named `DOCKER_HUB_API_KEY` with the corresponding API key value.  \n* **Instance-level variables** - These variables are available to all\nprojects on a GitLab instance. They are typically used for global settings\nthat apply across an entire organization such as:\n\n  * Default runner registration token: Provide a default token for registering new [runners](https://docs.gitlab.com/runner/).  \n  * License information: Store license keys for GitLab features or third-party tools.  \n  * Global environment settings: Define environment variables that should be available to all projects.  \n  * Example: You want to set a default Docker image for all projects on your GitLab instance. You create an instance-level variable named `DEFAULT_DOCKER_IMAGE` with the value `ubuntu:latest`.\n\n### Defining CI/CD variables\n\n\nTo define a CI/CD variable:\n\n\n1. Click on the **Settings > CI/CD** buttons for  your project, group, or\ninstance.  \n\n2. Go to the **Variables** section.  \n\n3. Click **Add variable**.  \n\n4. Enter the **key** (e.g., `API_KEY`) and **value**.  \n\n5. Optionally, check the **Protect variable** box for sensitive information.\nThis ensures that the variable is only available to pipelines running on\nprotected branches or tags.  \n\n6. Optionally, check the **Mask variable** box to hide the variable's value\nfrom job logs, preventing accidental exposure.  \n\n7. Click **Save variable**.\n\n\n### Using CI/CD variables\n\n\nTo use a CI/CD variable in your `.gitlab-ci.yml` file, simply prefix the\nvariable name with `$`:\n\n\n```yaml\n\ndeploy_job:\n  script:\n    - echo \"Deploying to production...\"\n    - curl -H \"Authorization: Bearer $API_KEY\" https://api.example.com/deploy\n```\n\n\n### Predefined CI/CD variables\n\n\nGitLab provides a set of [predefined CI/CD\nvariables](https://docs.gitlab.com/ci/variables/predefined_variables/) that\nyou can use in your pipelines. These variables provide information about the\ncurrent pipeline, job, project, and more.\n\n\nSome commonly used predefined variables include:\n\n\n* `$CI_COMMIT_SHA`: The commit SHA of the current pipeline.  \n\n* `$CI_PROJECT_DIR`: The directory where the project is cloned.  \n\n* `$CI_PIPELINE_ID`: The ID of the current pipeline.  \n\n* `$CI_ENVIRONMENT_NAME`: The name of the environment being deployed to (if\napplicable).\n\n\n### Best practices\n\n\n* Securely manage sensitive variables: Use protected and masked variables\nfor API keys, passwords, and other sensitive information.  \n\n* Avoid hardcoding values: Use variables to store configuration values,\nmaking your pipelines more flexible and maintainable.  \n\n* Organize your variables: Use descriptive names and group related variables\ntogether for better organization.  \n\n* Use the appropriate scope: Choose the correct scope (project, group, or\ninstance) for your variables based on their intended use and visibility.\n\n\n### Unlock the power of variables\n\n\nCI/CD variables are a powerful tool for customizing and securing your GitLab\npipelines. By mastering variables and understanding their different scopes,\nyou can create more flexible, maintainable, and efficient workflows.\n\n\nWe hope you found it helpful and are now well-equipped to leverage the power\nof GitLab for your development projects.\n\n\n> Get started with CI/CD variables today with a [free trial of\nGitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/).\n\n\n## \"Getting Started with GitLab\" series\n\nRead more articles in our \"Getting Started with GitLab\" series:\n\n\n- [How to manage\nusers](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n\n-  [How to import your projects to\nGitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n\n- [Mastering project\nmanagement](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n\n- [Automating Agile workflows with the gitlab-triage\ngem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n\n- [Understanding\nCI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n",[1146,766,721,9,109,917],{"slug":1209,"featured":91,"template":701},"getting-started-with-gitlab-working-with-ci-cd-variables","content:en-us:blog:getting-started-with-gitlab-working-with-ci-cd-variables.yml","Getting Started With Gitlab Working With Ci Cd Variables","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables.yml","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"_path":1215,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1216,"content":1222,"config":1227,"_id":1229,"_type":14,"title":1230,"_source":16,"_file":1231,"_stem":1232,"_extension":19},"/en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"title":1217,"description":1218,"ogTitle":1217,"ogDescription":1218,"noIndex":6,"ogImage":1219,"ogUrl":1220,"ogSiteName":686,"ogType":687,"canonicalUrls":1220,"schema":1221},"How to use GitLab for Agile, CI/CD, GitOps, and more","Read our example engineering stories from the past two years that show how to use GitLab for you DevOps cycle, including GitOps, CI/CD and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681825/Blog/Hero%20Images/triangle_geo.jpg","https://about.gitlab.com/blog/gitlab-for-cicd-agile-gitops-cloudnative","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile, CI/CD, GitOps, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":1217,"description":1218,"authors":1223,"heroImage":1219,"date":894,"body":1225,"category":741,"tags":1226},[1224],"Sara Kassabian","\n\nOn this blog, our community frequently shares tips, tricks, stories, and tutorials that demonstrate how to do different things with GitLab. This collection features some of our most popular and enduring how-to blog posts from the past two years, covering [CICD](/topics/ci-cd/), GitOps, Machine learning and more! See how various team members, companies, and users leverage GitLab to deliver software faster and more efficiently by reading and watching some of the tutorials we've featured.\n\n## Code review with GitLab\n\nWe know that code review is essential to effective collaboration, but the logistics of it all can be challenging. [Master code review by watching the demo](/blog/demo-mastering-code-review-with-gitlab/) included with this blog post.\n\n## Cool ways to use GitLab CI/CD\n\n### The basics of CI/CD\n\nBrand new to CI/CD? Read our [beginner's guide to the vocabulary and concepts](/blog/beginner-guide-ci-cd/).\n\nHere’s the [code you’ll need to build a CI/CD pipeline](/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab/) with AutoDeploy to Kubernetes, using GitLab and Helm.\n\nNext, find the [code you'll need to build a CI pipeline with GitLab](/blog/basics-of-gitlab-ci-updated/), allowing you to run jobs sequentially, in parallel, or out of order.\n\n### Pipelines with CI/CD\n\nLearn how to [build a CI/CD pipeline in 20 minutes (or less) using GitLab’s AutoDevOps](/blog/building-a-cicd-pipeline-in-20-mins/) capabilities by following the instructions in this blog post, which is based on a popular GitLab Commit Brooklyn presentation that you can watch below.\n\nDiscover [how to trigger pipelines across multiple projects](/blog/cross-project-pipeline/) using GitLab CI/CD.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD with Android\n\nAndroid project users are in luck because in [this post we explain how to set up GitLab continuous integration (CI) functions](/blog/setting-up-gitlab-ci-for-android-projects/) in Android projects.\n\nGitLab and fastlane pair up to [help users publish applications to the iOS store](/blog/ios-publishing-with-gitlab-and-fastlane/) using a GitLab CI/CD runner.\n\n### CI/CD and GKE\n\n![GitLab CI/CD and GKE integration](https://about.gitlab.com/images/blogimages/gitlab-gke-integration-cover.png){: .shadow.medium.center}\n\nWe explain [how to get started with GitLab CI/CD and Google Kubernetes Engine (GKE)](/blog/getting-started-gitlab-ci-gcp/) in this initial demo.\n\nGitLab self-managed user? ✅\nUsing Google Kubernetes engine? ✅\nGreat! The [next tutorial is all about how to use GitLab CI to install GitLab runners on GKE](/blog/gitlab-ci-on-google-kubernetes-engine/) using our integration. It shouldn’t take you more than 15 minutes.\n\n## GitLab for machine learning\n\nBut what about GitLab for machine learning? We’ve got you covered. Watch the demo from GitLab Virtual Commit to see how you can use GitLab to leverage tasks for machine learning pipelines.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/DJbQJDXmjew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab for Agile\n\nGitLab features work for many software development methodologies, including [Agile](/solutions/agile-delivery/).\n\nStart by [mapping Agile artifacts to GitLab features](/blog/gitlab-for-agile-software-development/) and explore how iteration works using GitLab.\n\n![GitLab issue board](https://about.gitlab.com/images/blogimages/issue-board.png){: .shadow.medium.left}\n\nThe GitLab issue board allows for flexible workflows and can be organized to represent [Agile software development](/topics/agile-delivery/) states.\n{: .note.text-center}\n\nThen go more in-depth to learn [how to use GitLab for Agile portfolio planning and project management](/blog/gitlab-for-agile-portfolio-planning-project-management/).\n\n## Giddy for GitOps?\n\n[GitOps](/topics/gitops/) takes DevOps best practices that are used for application development such as [version control](/topics/version-control/), collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitLab is the [DevOps platform](/topics/devops/) that does it all, and it’s built using Git, making it the ideal solution for GitOps processes.\n\nFirst, we explained [how GitLab and Ansible can be used together for GitOps](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/) processes and [infrastructure as code](/topics/gitops/infrastructure-as-code/). In a follow-up post, we explain how [GitLab can also be paired with Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/) and IaC.\n\nThe video on how to use Ansible and GitLab together has been viewed more than 13,000 times since it was first created in 2019, and is embedded for you below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/M-SgRTKSeOg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Visibility\n\nOne of our principles at GitLab is to [dogfood everything](/handbook/engineering/development/principles#dogfooding), so you can rest assured that we aren’t about to introduce an engineering feature without first trying it out for ourselves. When it comes to our Insights tool though, the process happened in reverse. Our Engineering Productivity team at GitLab needed a particular tool, and as we built it, we realized it would benefit our GitLab Ultimate customers. Read on to [learn how our Insights tool came to be](/blog/insights/).\n\nDig into this [valuable explanation of how we discovered that Prometheus query language can be used to detect anomalies](/blog/anomaly-detection-using-prometheus/) in the time-series data that GitLab.com reports.\n\n## In the clouds\n\nWatch the demo to learn how GitLab runner and RedHat OpenShift can work together to jump start your application development and deployment to the cloud.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yGWiQwrWimk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd finally, although Docker Hub may be enforcing new rate limits, there's no need to panic. We [explain how to build a monitoring plug-in](/blog/docker-hub-rate-limit-monitoring/) to help you monitor the number of pull requests.\n\nCan you think of some other stand-out blog posts or demos that we should include here? Drop the link in a comment below.\n\nCover image by [Chris Robert](https://unsplash.com/@chris_robert) on [Unsplash](https://unsplash.com/photos/kY-uPDLXxHg)\n{: .note}\n",[721,9,853,877],{"slug":1228,"featured":6,"template":701},"gitlab-for-cicd-agile-gitops-cloudnative","content:en-us:blog:gitlab-for-cicd-agile-gitops-cloudnative.yml","Gitlab For Cicd Agile Gitops Cloudnative","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative.yml","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"_path":1234,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1235,"content":1241,"config":1247,"_id":1249,"_type":14,"title":1250,"_source":16,"_file":1251,"_stem":1252,"_extension":19},"/en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"title":1236,"description":1237,"ogTitle":1236,"ogDescription":1237,"noIndex":6,"ogImage":1238,"ogUrl":1239,"ogSiteName":686,"ogType":687,"canonicalUrls":1239,"schema":1240},"GitLab is the single source of truth for eCommerce provider","Swell uses GitLab company-wide and says the biggest advantage so far is the review operations capability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668755/Blog/Hero%20Images/swelllogo3.png","https://about.gitlab.com/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is the single source of truth for eCommerce provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-23\",\n      }",{"title":1236,"description":1237,"authors":1242,"heroImage":1238,"date":1243,"body":1244,"category":1103,"tags":1245},[1185],"2022-06-23","eCommerce platform provider [Swell](https://www.swell.is) was built to give entrepreneurs the opportunity to build the online business that they envision. A GitLab customer since 2021, GitLab has been adopted as Swell's one DevOps, project management, and support ticketing tool for the whole organization. It's the foundational platform that the business works on.\n\nSwell is using GitLab Premium in many different areas, including for product development and to build the platform infrastructure, says Nico Bistolfi, vice president of technology.\n\n\"GitLab is our source of truth for everything,\" Bistolfi says. Now, Swell is looking into expanding its usage of the platform to leverage features such as code quality, automation, and other types of dynamic application security and static application security.\n\n## GitLab for CI/CD\nSwell upgraded to the Premium version and the biggest advantage so far πpath-to-decomposing-gitlab-database-part2has been the review operations capability, Bistolfi says. The company has created environments for every merge request users make, and that replicates in production for testers to see what was changed, whether a fix was made, or how the new feature is working.\n\n\"We could not go to our software development lifecycle today without the review ops. That's something that is critical for us,\" Bistolfi says.\n\nGitLab is used for both continuous integration (CI) and continuous deployment (CD). While building the [CI/CD](/topics/ci-cd/) pipeline process is ongoing, Bistolfi says, “We are slowly changing it and relying more and more on GitLab” in areas, including application security.\n\nBefore moving to GitLab, Swell was using bare-metal servers. The company now uses GitLab’s container management solutions and all API updates are happening through the platform.\n\n## From inputting issues to resolution\nEveryone at Swell is using GitLab — not just developers — and for a variety of tasks. The company has created a way to process support tickets through the platform. Another use case is knowledge management.\n\n\"We find ourselves making some decisions from comments in GitLab,\" he says. The whole process from the time a ticket is created to being resolved is done within the platform.\n\nThe company culture is about full information transparency, Bistolfi says, particularly since Swell is fully remote and employees work from 11 different countries. So one goal is to maintain asynchronous communication.\n\nWhen an issue is created in the platform, a little bit of coding is required, but he said non-developer users have adapted well. The feedback so far has been that using GitLab has been frictionless.\n\n## Speed to delivery\nInitially, for some services, it took about 30 minutes to build and deploy an image. Now, the process has been decreased to between one and five minutes in most cases.\n\nSwell manually sets release dates for system improvements and, right now, there are about two a week. The company is working on automating the process for continuous delivery with the goal of soon having releases every couple of hours.\n\n## Team play\nSwell manages team backlogs, sprints, milestones, and future work using its own flavor of Kanban with what Bistolfi calls \"quick labels.\"\n\nEngineering teams are being scaled and, in addition to Kanban, some projects are done using Scrum. Changing their GitLab configuration has let teams measure velocity better.  \n\nA future goal is to gain visibility into team results, as well as use GitLab for project planning and management, he says.\n\n## GitLab as a product and company\nBistolfi is unequivocal in his enthusiasm for GitLab. \"We know that GitLab is there for us to continue growing,\" he says. \"We know we can rely on that. And something that I always tell a team when we are evaluating what we're going to do or how we're going to solve certain problems is that there are areas GitLab is just starting to innovate on or is just starting to launch new features.\"\n\nIf those areas are at 80% of what Swell needs, the company will continue to use GitLab. \"We need to have very, very strong reasons to look for another tool to integrate with GitLab.\" He added that \"we trust that GitLab is going in the right direction for us. In addition, we've gained efficiency in our ability to provide consistent test environments using Gitlab Review Apps to reduce regressions and improve new feature development.\"\n\nThe Swell team also likes that GitLab provides thorough and complete information in its handbook, which has been very beneficial in helping the company manage things internally. \"That has been inspiring for many of us on the executive team,\" he notes.\n\nFor example, during the pandemic, Bistolfi put together a document called \"The Ultimate Guide for Swell Engineers,\" which contains three pages of information about culture, what to expect from teammates, and how to communicate and prioritize tasks.\n\nA lot of guidance came from the GitLab handbook, he adds.\n\nMoving forward with GitLab, Bistolfi says: \"We are incorporating most of the Security and Compliance tools in order to keep track and audit for our compliance. We plan to expand the usage to other projects, but we are already using container and dependency scanning, SAST, secrets detection, and license scanning for some of our core and more sensitive services.\"\n\nWhat Swell likes most about GitLab is the thoroughness of the tool. \"From an engineering perspective, 10 years ago, you would never have imagined all the features and capabilities that GitLab offers being incorporated into one platform,\" Bistolfi says.",[765,721,9,787,1246],"customers",{"slug":1248,"featured":6,"template":701},"gitlab-is-the-single-source-of-truth-for-ecommerce-provider","content:en-us:blog:gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","Gitlab Is The Single Source Of Truth For Ecommerce Provider","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"_path":1254,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1255,"content":1261,"config":1267,"_id":1269,"_type":14,"title":1270,"_source":16,"_file":1271,"_stem":1272,"_extension":19},"/en-us/blog/gitlab-jira-integration-selfmanaged",{"title":1256,"description":1257,"ogTitle":1256,"ogDescription":1257,"noIndex":6,"ogImage":1258,"ogUrl":1259,"ogSiteName":686,"ogType":687,"canonicalUrls":1259,"schema":1260},"How to achieve a GitLab Jira integration","Check out how to integrate GitLab self-managed with Atlassian Jira to connect your merge requests, branches, and commits to a Jira issue.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667260/Blog/Hero%20Images/twopeasinapod.jpg","https://about.gitlab.com/blog/gitlab-jira-integration-selfmanaged","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to achieve a GitLab Jira integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-04-12\",\n      }",{"title":1256,"description":1257,"authors":1262,"heroImage":1258,"date":1264,"body":1265,"category":741,"tags":1266},[1263],"Tye Davis","2021-04-12","\n_This is the second in a series of posts on GitLab Jira integration strategies. The [first post](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explains how to integrate GitLab.com with Jira Cloud._\n\nThe advantages of a GitLab Jira integration are clear:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance. \n* Quickly navigate to Jira issues from GitLab. \n* Detect and link to Jira issues from GitLab commits and merge requests. \n* Log GitLab events in the associated Jira issue. \n* Automatically close (transition) Jira issues with GitLab commits and merge requests.\n\nHere's a step-by-step guide of everything you need to know to achieve a GitLab Jira integration.\n\n## Pre-configuration\n\nAs you approach configuring your GitLab project to Jira, you can choose from two options that best fit your company or organization's needs.  You can either:\n\n* Use a service template by having a GitLab administrator provide default values for configuring integrations at the project level. When enabled, the defaults are applied to all projects that do not already have the integration enabled or do not otherwise have custom values enabled. The Jira integration values are all pre-filled on each project's configuration page for jira integration. If you disable the template, these values no longer appear as defaults, while any values already saved for an integration remain unchanged.\n\n* Configure integrations at a specific project level that will contain custom values specific to that project and that project alone.\n\nIt should be noted that each GitLab project can be configured to connect to an entire Jira instance. That means one GitLab project can interact with all Jira projects in that instance, once configured. Therefore, you will not have to explicitly associate a GitLab project with any single Jira project.\n\nGitLab offers several different options that allow you to integrate Jira in a way that best fits you and your team's needs based on how you’ve set up your Jira software. Let’s take a deeper look into how to set-up each of these available options.\n\n## How to configure Jira\n\nThe first step in setting up your Gitlab Jira integration is having your Jira configuration in order. \n\n**Jira Server** supports basic authentication. When connecting, a username and password are required. Note that connecting to Jira Server via CAS is not possible. Set up a user in Jira Server first and then proceed to Configuring GitLab.\n\n**Jira Cloud** supports authentication through an API token, and in order to begin the process you need to start by creating one within Jira. When connecting to Jira Cloud, an email and API token are required. Set up a user in Jira Cloud first and then proceed to Configuring GitLab. \n\nCreate an API token here: https://id.atlassian.com/manage-profile/security/api-tokens  \n\n* Log in to id.atlassian.com with your email address. It is important that the user associated with this email address has write access to projects in Jira\n\n* Click Create API token.\n\n![Create API Token in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/createjiratoken.png){: .shadow.medium.center}\nJira API token creation\n{: .note.text-center}\n\n* Click Copy, or click View and write down the new API token. It is required when configuring GitLab.\n\n![Copy API Token](https://about.gitlab.com/images/blogimages/atlassianjira/copyjiratoken.png){: .shadow.medium.center}\nJira API token copy to clipboard\n{: .note.text-center}\n\n## How to configure GitLab\n\nAs mentioned above, you can begin setting up the Jira integration either by using a service template that defaults all GitLab projects to pre-fill Jira values or you can set up at an individual project level. \n\nTo set up a service template:\n\n* 1a. Navigate to the Admin Area > Service Templates and choose the Jira service template.\n\n![GitLab Service Templates](https://about.gitlab.com/images/blogimages/atlassianjira/GitLabServiceTemplates.png){: .shadow.medium.center}\nGitLab Service Templates\n{: .note.text-center}\n\n2a. For each project, you will still need to configure the issue tracking URLs by replacing :issues_tracker_id in the above screenshot with the ID used by your external issue tracker.\n\n![Issue Tracker ID](https://about.gitlab.com/images/blogimages/atlassianjira/issuetrackerid.png){: .shadow.medium.center}\nIssue Tracker ID\n{: .note.text-center}\n\nTo set up a individual project template:\n\n* 1b. To enable the Jira integration in a project, navigate to the Integrations page and click the Jira service.\n\n![Enable Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraintegration.png){: .shadow.medium.center}\nEnable Jira Integration\n{: .note.text-center}\n\n* 2b. Select a Trigger action. This determines whether a mention of a Jira issue in GitLab commits, merge requests, or both, should link the Jira issue back to that source commit/MR and transition the Jira issue, if indicated.\n\n![Select Trigger Action](https://about.gitlab.com/images/blogimages/atlassianjira/selecttriggeraction.png){: .shadow.medium.center}\nSelect Trigger Action\n{: .note.text-center}\n\n* 3b. To include a comment on the Jira issue when the above reference is made in GitLab, check Enable comments.\n\n* 3c.  Enter the further details on the page as described in the following table:\n\n| Field | Description |\n|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Web URL | The base URL to the Jira instance web interface which is being linked to this GitLab project. E.g.,  https://jira.example.com. |\n| Jira API URL | The base URL to the Jira instance API. Web URL value will be used if not set. E.g.,  https://jira-api.example.com. Leave this field blank (or use the same value of Web URL) if using Jira Cloud.|\n| Username or Email | Use username for Jira Server or email for Jira Cloud |\n| Transition ID | Required for closing Jira issues via commits or merge requests. This is the ID of a transition in Jira that moves issues to a desired state. If you insert multiple transition IDs separated by , or;, the issue is moved to each state, one after another, using the given order. (See below for obtaining a transition ID) |\n\nIn order to obtain a transition ID, do the following:\n* By using the API, with a request like https://yourcompany.atlassian.net/rest/api/2/issue/ISSUE-123/transitions using an issue that is in the appropriate “open” state\n\n*Note: The transition ID may vary between workflows (e.g., bug vs. story), even if the status you are changing to is the same.*\n\n![Transition ID](https://about.gitlab.com/images/blogimages/atlassianjira/transitionid.png){: .shadow.medium.center}\nTransition ID\n{: .note.text-center}\n\nYour GitLab project can now interact with all Jira projects in your instance and the project now displays a Jira link that opens the Jira project.\n\nWhen you have configured all settings, click **Test settings and save changes.** \n\n![Test settings and save changes](https://about.gitlab.com/images/blogimages/atlassianjira/testsettingsandsavechanges.png){: .shadow.medium.center}\nTest settings and save changes\n{: .note.text-center}\n\nIt should be noted that you can only display issues from a single Jira project within a given GitLab project.\n\nThe integration is now **activated:**\n\n![Active Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/activeintegration.png){: .shadow.medium.center}\nActive Jira Integration\n{: .note.text-center}\n\n## Jira Issues\n\nBy now you should have [configured Jira](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-jira) and enabled the [Jira service in GitLab](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-gitlab). If everything is set up correctly you should be able to reference and close Jira issues by just mentioning their ID in GitLab commits and merge requests.\n\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n### 1.How to reference Jira issues\n\nWhen GitLab project has Jira issue tracker configured and enabled, mentioning Jira issue in GitLab will automatically add a comment in Jira issue with the link back to GitLab. This means that in comments in merge requests and commits referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format:\n\nUSER mentioned this issue in RESOURCE_NAME of [PROJECT_NAME|LINK_TO_COMMENT]:\nENTITY_TITLE\n\n* USER A user that mentioned the issue. This is the link to the user profile in GitLab.\n* LINK_TO_THE_COMMENT Link to the origin of mention with a name of the entity where Jira issue was mentioned.\n* RESOURCE_NAME Kind of resource which referenced the issue. Can be a commit or merge request.\n* PROJECT_NAME GitLab project name.\n* ENTITY_TITLE Merge request title or commit message first line.\n\n![Reference Jira issues](https://about.gitlab.com/images/blogimages/atlassianjira/issuelinks.png){: .shadow.medium.center}\nReference Jira issues\n{: .note.text-center}\n\nFor example, the following commit will reference the Jira issue with PROJECT-1 as its ID:\n\ngit commit -m \"PROJECT-1 Fix spelling and grammar\"\n\nClosing Jira Issues\n\nJira issues can be closed directly from GitLab when you push code by using trigger words in commits and merge requests. When a commit which contains the trigger word followed by the Jira issue ID in the commit message is pushed, GitLab will add a comment in the mentioned Jira issue and immediately close it (provided the transition ID was set up correctly).\n\nThere are currently three trigger words, and you can use either one to achieve the same goal:\n* Resolves PROJECT-1\n* Closes PROJECT-1\n* Fixes PROJECT-1\n\nwhere PROJECT-1 is the ID of the Jira issue.\n\nNotes:\n\n* Only commits and merges into the project’s default branch (usually main or master) will close an issue in Jira. You can change your projects default branch under project settings.\n\n* The Jira issue will not be transitioned if it has a resolution.\n\nLet’s consider the following example:\n\n* For the project named PROJECT in Jira, we implemented a new feature and created a merge request in GitLab.\n* This feature was requested in Jira issue PROJECT-7 and the merge request in GitLab contains the improvement\n* In the merge request description we use the issue closing trigger Closes PROJECT-7.\n* Once the merge request is merged, the Jira issue will be automatically closed with a comment and an associated link to the commit that resolved the issue.\n\nIn the following screenshot you can see what the link references to the Jira issue look like.\n\n![GitLab link references](https://about.gitlab.com/images/blogimages/atlassianjira/linkreferences.png){: .shadow.medium.center}\nGitLab link references\n{: .note.text-center}\n\nOnce this merge request is merged, the Jira issue will be automatically closed with a link to the commit that resolved the issue.\n\n![Jira Issue auto closes when GitLab MR merges](https://about.gitlab.com/images/blogimages/atlassianjira/jiraautoclose.png){: .shadow.medium.center}\nJira Issue auto closes when GitLab MR merges\n{: .note.text-center}\n\n## Development Panel Integration Set-Up\n\n### A. Jira DVCS configuration\n\nWhen using the Jira DVCS configuration, there are several different configurations you can make that are dependent on how your Jira/GitLab instances are managed.\n\n* If you are using self-managed GitLab, make sure your GitLab instance is accessible by Jira.\n* If you’re connecting to Jira Cloud, ensure your instance is accessible through the internet.\n* If you are using Jira Server, make sure your instance is accessible however your network is set up.\n\n### B. GitLab account configuration for DVCS\n\n* In GitLab, create a new application to allow Jira to connect with your GitLab account.\nWhile signed in to the GitLab account that you want Jira to use to connect to GitLab, click your profile avatar at the top right, and then click Settings > Applications. Use the form to create a new application.\n\n* In the Name field, enter a descriptive name for the integration, such as Jira.\nFor the Redirect URI field, enter https://\u003Cgitlab.example.com>/login/oauth/callback, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/login/oauth/callback.\nNote: If using a GitLab version earlier than 11.3, the Redirect URI must be https://\u003Cgitlab.example.com>/-/jira/login/oauth/callback. If you want Jira to have access to all projects, GitLab recommends that an administrator create the application.\n\n![Admin Creates Integration](https://about.gitlab.com/images/blogimages/atlassianjira/admincreates.png){: .shadow.medium.center}\nAdmin Creates Integration\n{: .note.text-center}\n\n* Check API in the Scopes section and uncheck any other checkboxes.\n\n* Click Save application. GitLab displays the generated Application ID and Secret values. Copy these values, which you will use in Jira.\n\n*Tip: To ensure that regular user account maintenance doesn’t impact your integration, create and use a single-purpose jira user in GitLab.*\n\n## Jira DVCS Connector setup\n\nNote: If you’re using GitLab.com and Jira Cloud, we recommend you use the [GitLab for Jira app](https://docs.gitlab.com/ee/integration/jira/index.html), unless you have a specific need for the DVCS Connector.\n\n* Ensure you have completed the [GitLab configuration](https://docs.gitlab.com/ee/integration/jira/index.html).\n\n![Check api in Applications](https://about.gitlab.com/images/blogimages/atlassianjira/checkapi.png){: .shadow.medium.center}\nCheck api in Applications\n{: .note.text-center}\n\n![Application was created successfully](https://about.gitlab.com/images/blogimages/atlassianjira/applicationsuccessful.png){: .shadow.medium.center}\nApplication was created successfully\n{: .note.text-center}\n\n* If you’re using Jira Server, go to Settings (gear) > Applications > DVCS accounts. If you’re using Jira Cloud, go to Settings (gear) > Products > DVCS accounts.\n\n![Go to DVCS in Settings](https://about.gitlab.com/images/blogimages/atlassianjira/dvcssettings.png){: .shadow.medium.center}\nGo to DVCS in Settings\n{: .note.text-center}\n\n* Click Link GitHub Enterprise account to start creating a new integration. (We’re pretending to be GitHub in this integration, until there’s additional platform support in Jira.)\n\n![Click Link to start new integration](https://about.gitlab.com/images/blogimages/atlassianjira/dvcsaccount.png){: .shadow.medium.center}\nClick Link to start new integration\n{: .note.text-center}\n\n* Complete the form:\nSelect GitHub Enterprise for the Host field.\nIn the Team or User Account field, enter the relative path of a top-level GitLab group that you have access to, or the relative path of your personal namespace.\n\n![Add new account](https://about.gitlab.com/images/blogimages/atlassianjira/addnewaccount.png){: .shadow.medium.center}\nAdd new account\n{: .note.text-center}\n\nIn the Host URL field, enter https://\u003Cgitlab.example.com>/, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/.\n\n*Note: If using a GitLab version earlier than 11.3 the Host URL value should be https://\u003Cgitlab.example.com>/-/jira*\n\nFor the Client ID field, use the Application ID value from the previous section.\n\nFor the Client Secret field, use the Secret value from the previous section.\n\nEnsure that the rest of the checkboxes are checked.\n\n* Click Add to complete and create the integration.\nJira takes up to a few minutes to know about (import behind the scenes) all the commits and branches for all the projects in the GitLab group you specified in the previous step. These are refreshed every 60 minutes.\n\nIn the future, we plan on implementing real-time integration. If you need to refresh the data manually, you can do this from the Applications -> DVCS accounts screen where you initially set up the integration:\n\n![Refresh data manually](https://about.gitlab.com/images/blogimages/atlassianjira/refreshdata.png){: .shadow.medium.center}\nRefresh data manually\n{: .note.text-center}\n\nTo connect additional GitLab projects from other GitLab top-level groups (or personal namespaces), repeat the previous steps with additional Jira DVCS accounts.\n\nFor troubleshooting your DVCS connection, go to [GitLab Docs](https://docs.gitlab.com/ee/integration/jira/index.html) for more information.\n\n_In our next blog post we'll look at [Usage](https://docs.gitlab.com/ee/integration/jira_development_panel.html#usage)._\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[1001,765,9],{"slug":1268,"featured":6,"template":701},"gitlab-jira-integration-selfmanaged","content:en-us:blog:gitlab-jira-integration-selfmanaged.yml","Gitlab Jira Integration Selfmanaged","en-us/blog/gitlab-jira-integration-selfmanaged.yml","en-us/blog/gitlab-jira-integration-selfmanaged",{"_path":1274,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1275,"content":1281,"config":1288,"_id":1290,"_type":14,"title":1291,"_source":16,"_file":1292,"_stem":1293,"_extension":19},"/en-us/blog/gitlab-joins-cd-foundation",{"title":1276,"description":1277,"ogTitle":1276,"ogDescription":1277,"noIndex":6,"ogImage":1278,"ogUrl":1279,"ogSiteName":686,"ogType":687,"canonicalUrls":1279,"schema":1280},"GitLab leads the industry forward with the CD Foundation","Today we're proud to announce we've joined the CD Foundation as a founding member.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/gitlab-joins-cd-foundation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab leads the industry forward with the CD Foundation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-03-12\",\n      }",{"title":1276,"description":1277,"authors":1282,"heroImage":1278,"date":1284,"body":1285,"category":301,"tags":1286},[1283],"Sid Sijbrandij","2019-03-12","\n\nToday GitLab joined the [CD Foundation](https://cd.foundation/announcement/2019/03/12/the-linux-foundation-announces-new-foundation-to-support-continuous-delivery-collaboration/) as a founding member, to help foster collaboration and educate the industry on how to enable any software development team around the world to implement CI/CD best practices.\n\nAs one of the first to introduce cloud native CI/CD to the industry, we are excited to see so many companies come together to discuss ways to take the industry forward to ensure that code is able to get to production not only quickly, but securely. We are looking forward to lending our experience working with millions of developers and thousands of enterprises to drive forward the conversation on best practices and standards to streamline the code delivery promise.\n\n## But there is more to software delivery than CI/CD\n\nWhile it is great there is an eye on the best CI/CD practices, we believe there is more to delivering great software to market than just CI/CD, which is why at GitLab we are focused on providing a single application for the entire DevOps lifecycle.\n\nIt is not only about source code management or CI/CD but also about:\n- [Value stream management](/solutions/value-stream-management/): Understanding your teams' work and their workflow so they can deliver value to customers faster.\n- Operational excellence: Implementing dynamic infrastructure and robust observability to increase uptime and decrease mean time to resolution.\n- Security flow: Building security into every step of your code delivery process, to deliver secure software without slowing the pace of innovation.\n- Monitoring: Automatically monitor metrics so you know how any change in code impacts your production environment.\n\nOur entire [2019 product vision and beyond](/blog/gitlab-product-vision/) is about continuing to build out new capabilities across the entire DevOps lifecycle, to make it easier for enterprises to streamline their processes into one application, helping teams innovate at faster speeds.\n\nAs an open source company, we value the community’s contributions, in helping make GitLab what it is today. We look forward to continuing to drive the industry forward in CI/CD, as well as working with you to help deliver your products to market quickly and securely.\n\nPhoto by [YIFEI CHEN](https://unsplash.com/photos/FPMRxKd7MxI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/spiral-lights?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1001,9,1287],"open source",{"slug":1289,"featured":6,"template":701},"gitlab-joins-cd-foundation","content:en-us:blog:gitlab-joins-cd-foundation.yml","Gitlab Joins Cd Foundation","en-us/blog/gitlab-joins-cd-foundation.yml","en-us/blog/gitlab-joins-cd-foundation",{"_path":1295,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1296,"content":1302,"config":1307,"_id":1309,"_type":14,"title":1310,"_source":16,"_file":1311,"_stem":1312,"_extension":19},"/en-us/blog/gitlab-oracle-cloud-arm-based",{"title":1297,"description":1298,"ogTitle":1297,"ogDescription":1298,"noIndex":6,"ogImage":1299,"ogUrl":1300,"ogSiteName":686,"ogType":687,"canonicalUrls":1300,"schema":1301},"How to use GitLab with OCI ARM-based compute instances","We explain two ways to set up GitLab on Oracle ARM-based instances.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679507/Blog/Hero%20Images/ci-cd.png","https://about.gitlab.com/blog/gitlab-oracle-cloud-arm-based","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab with OCI ARM-based compute instances\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2021-05-25\",\n      }",{"title":1297,"description":1298,"authors":1303,"heroImage":1299,"date":1304,"body":1305,"category":808,"tags":1306},[1019],"2021-05-25","\n\n[ARM-based processors](https://en.wikipedia.org/wiki/ARM_architecture) have gained popularity due to their energy-saving capabilities and performance as shown in the recent adoptions by Apple. Previously a mainstay for mobile, edge, or small devices, ARM-based chips are now used for almost all types of systems, including servers. \n\nThis surge in the use of ARM-based systems means development toolchains have to support building for the ARM architecture reliably and efficiently. It is here where the GitLab Runner shines, allowing users to run CI/CD jobs on ARM servers. Coupling the GitLab Runner with the Oracle Cloud Infrastructure (OCI) offerings of ARM-based compute instances lets development teams have best in class CI/CD infrastructure to target both ARM and x86 architecture.\n \nThe recommended method of installing GitLab is using the automated deployment options for OCI by clicking the \"[Deploy to Oracle Cloud](https://console.us-phoenix-1.oraclecloud.com/resourcemanager/stacks/create?region=home&zipUrl=https://gitlab.com/gitlab-com/alliances/oracle/sandbox-projects/gitlab-terraform-oci/-/jobs/artifacts/main/raw/oci-gitlab-orm.zip?job=package_repo)\" button, which takes advantage of full-tested scripts for single click deployment through the OCI console.s.\n\nIf you will be deploying manually on virtual machines on OCI, there are certain caveats users need to be aware of when setting up GitLab Runner and GitLab on an OCI ARM-based instance.\n\n## How to set up GitLab CI/CD on ARM instances\n\nThe core feature of [GitLab CI/CD](/topics/ci-cd/) is the runner – it executes all the instructions to accomplish the jobs in the CI/CD pipelines. One of its strengths is the support for the diverse architecture and operating systems, including Oracle Linux server distribution running on ARM-based systems. This functionality allows users to maintain diverse runners targeting different architectures for the various workloads of development teams. \n\nInstalling the GitLab Runner on ARM-based instances is straightforward: After adding the official GitLab package repository, install the runner. However, if you are running Oracle Linux Server release 8.x (ol/8), you will need to manually set up the package repository, because  ol/8 by PackageCloud, which GitLab uses to host packages, is not supported. \n\nTo set up the repository manually use the following commands:\n\n```\ncurl https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh > script.sh\nchmod +x script.sh\nos=el dist=8 ./script.sh\n```\n\n## How to set up GitLab EE/Core on ARM instances\n\nSimilar to the Runner, you will need to manually set up GitLab's package repository if you are running ol/8. The commands are similar, aside from the package URL as shown below: \n\n```\ncurl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh > script.sh\nchmod +x script.sh\nos=el dist=8 ./script.sh\nEXTERNAL_URL=\"[GITLAB-INSTANCE-URL]\" dnf install -y gitlab-ee\n```\n\nOne caveat to deploying to recent versions of GitLab using Omnibus is the [ARM64 cow bug affecting Redis](https://github.com/redis/redis/pull/8405), which is bundled with GitLab Omnibus installations. This bug only affects GitLab versions from 13.9 – which was the version in which the bundled Redis was upgraded to 6.0.10. You can install pre-13.9 versions of GitLab manually. For example, to install version 13.8use the command: `yum install gitlab-ee-13.8.6-ee.0.el8.aarch64`.\n\nThe fix for the bug is pulled into the [6.0 branch of the Redis upstream project](https://github.com/redis/redis/commit/dcf409f8e72dcd6bbf2f31d2ecc8f6f797c303c2) and will make its way to future GitLab releases. The bug only affects Redis on ARM64 architecture (aarch64) and is not specific to GitLab or the Oracle Linux server. You can [disable the bundled Redis instance and configure](https://docs.gitlab.com/omnibus/settings/redis.html) a separate local Redis instance or an external service.\n## Watch and learn\nWatch the video to see how to set up a Runner on an OCI ARM64 instance running the Oracle Linux server.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Q2o0JYdQAWE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[808,721,9],{"slug":1308,"featured":6,"template":701},"gitlab-oracle-cloud-arm-based","content:en-us:blog:gitlab-oracle-cloud-arm-based.yml","Gitlab Oracle Cloud Arm Based","en-us/blog/gitlab-oracle-cloud-arm-based.yml","en-us/blog/gitlab-oracle-cloud-arm-based",{"_path":1314,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1315,"content":1321,"config":1328,"_id":1330,"_type":14,"title":1331,"_source":16,"_file":1332,"_stem":1333,"_extension":19},"/en-us/blog/gitops-done-3-ways",{"title":1316,"description":1317,"ogTitle":1316,"ogDescription":1317,"noIndex":6,"ogImage":1318,"ogUrl":1319,"ogSiteName":686,"ogType":687,"canonicalUrls":1319,"schema":1320},"3 Ways to approach GitOps","Learn about how GitLab users can employ GitOps to cover both Kubernetes and non-Kubernetes environments","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669635/Blog/Hero%20Images/gitops-cover.jpg","https://about.gitlab.com/blog/gitops-done-3-ways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to approach GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-04-27\",\n      }",{"title":1316,"description":1317,"authors":1322,"heroImage":1318,"date":1325,"body":1326,"category":741,"tags":1327},[1323,1324],"Saumya Upadhyaya","Dov Hershkovitch","2021-04-27","\n\nThe term [\"GitOps\"](/topics/gitops/) first emerged in the Kubernetes community as a way for organizations to enable Ops teams move at the pace of application development. With improved automation and less risk, GitOps is quickly becoming the workflow of choice for infrastructure automation.\n\nAt GitLab, the approach to GitOps goes beyond Kubernetes. Before the buzz around GitOps picked up in the DevOps community, GitLab users and customers were applying GitOps principles to all types of infrastructure, including physical servers, virtual machines, containers, and Kubernetes clusters ([multicloud](/topics/multicloud/) and on-premise).\n\n## What is GitOps?\n\nThere are two main [approaches to GitOps](https://www.gitops.tech/), a push-based approach and a pull-based approach.\n\n- *Push-based approach*: A CI/CD tool pushes the changes to the environment. Applying GitOps via push is consistent with the approach used for application deployment. In this case, deployment targets for a push-based approach are not limited to Kubernetes.\n![push based deployment](https://about.gitlab.com/images/blogimages/gitops-push.png){: .shadow.medium.center}\nHow the push-based approach works for GitOps.\n{: .note.text-center}\n\n- *Pull-based approach*: An agent installed in a cluster pulls changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n![pull based deployment](https://about.gitlab.com/images/blogimages/gitops-pull.png){: .shadow.medium.center}\nHow the pull-based approach works for GitOps.\n{: .note.text-center}\n\n## How to employ GitOps principles using GitLab\n\nGitLab supports both of the approaches mentioned above, which can be used with and without a Kubernetes agent. Along with the [recently introduced Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/), GitLab supports GitOps principles by supporting a three types of deployment targets and environments: The single application for infrastructure code; configurations using CI/CD for automation; and merge requests for collaboration and controls.\n\nBelow we unpack three methods for applying GitOps principles using GitLab technology.\n\n### Push using manually configured CI/CD release targets\n\nThe infrastructure configurations are stored in git. The user sets up the [supported deployment targets](/install/) and uses the standard CI/CD workflow to push infrastructure changes. To ensure the desired state in the repository is consistent with the environment, CI/CD will need to run on a regular schedule to identify drift and reconcile as required. Manual intervention may be required at times to cater to failed pipelines. Many GitLab users have been using this approach to push infrastructure changes to their test, staging, and production environments.\n\nThe manual push approach is ideal for both Kubernetes and supported non-Kubernetes environments, such as embedded systems, on-premise servers, mainframes, virtual machines, or FaaS offerings.\n\n### Push using Terraform\n\nIn this approach, an out-of-the box [integration with Terraform](https://docs.gitlab.com/ee/user/infrastructure/) helps Terraform users seamlessly implement GitOps workflows using GitLab. Terraform manifests are stored in the Git repository where users can collaborate on changes within the merge requests. The Terraform plan reports can be displayed within the merge requests and the Terraform state can be stored using the GitLab-managed Terraform state backend. Everything is integrated into GitLab, which spares users from performing these tasks via third-party tools or integrations.\n\nThe push approach is ideal for both Kubernetes and non-Kubernetes deployment targets that are supported by Terraform.\n\n### Pull using a Kubernetes agent\n\nIn fall 2020, GitLab [introduced a Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) that initiates a secure web-socket connection from a Kubernetes cluster to a GitLab instance. There is a GitLab server component that polls for any repository changes on the server and informs the agent when there is a deviation between the desired state and the cluster environment. This process helps minimize the load on the cluster and network. Whenever a drift is detected the agent pulls the latest configurations from the git repository and updates the environment accordingly. This GitOps approach requires the Kubernetes agent to be installed on every Kubernetes cluster, which can be done with ease as the GitLab Agent for Kubernetes uses GitOps principles to install and update the agent as required. This GitOps method is ideal for Kubernetes environments only.\n\n![kubernetes agent](https://about.gitlab.com/images/blogimages/gitops-agent.png){: .shadow.medium.center}\nInside the pull-based approach using a Kubernetes agent.\n{: .note.text-center}\n\n### Up next: Push using a Kubernetes agent\n\nGitLab also aims to support GitOps is by using a push approach with a Kubernetes agent. The push based approach using manually configured Kubernetes target attaches a Kubernetes cluster to GitLab through a certificate exchange. This approach leverages the CI/CD workflow for infrastructure automation and is fairly straightforward, but it also introduces risk by opening up a firewall and using cluster admin rights for cluster integration. To overcome these challenges while leveraging the CI/CD workflow - the [push-based approach using the Kubernetes agent](https://gitlab.com/groups/gitlab-org/-/epics/5528) aims to reuse the web-socket interface to establish a secure connection between GitLab and the Kubernetes cluster and allows GitLab CI/CD to securely push changes using this interface. When available, this approach would also provide a migration path for users who are currently setting up the Kubernetes integration using a certificate exchange.\n\nThe third approach is ideal for Kubernetes environments only. When available, it can be used in conjuction with the pull-based approach to optimize the GitOps workflow.\n\n## Accelerate the SDLC with GitOps principles\n\nWhether you are using physical, virtual, containers, Kubernetes - on-prem or cloud-based infrastructures – GitLab uses GitOps principles a variety of ways to meet your team wherever it's at. GitLab supports many different options because we understand the typical organization has a mixed IT landscape, with various heterogeneous technologies in a number of different environments.\n\n***What’s your preferred approach to GitOps?*** Drop us a comment.\n\n## Learn more about GitOps at GitLab\n\nRead on to explore how GitLab works with different technologies to deliver a GitOps solution for every company at every stage.\n\n* ***Blog***: [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n* ***Webcast***: [GitLab and HashiCorp - A holistic guide to GitOps and the Cloud Operating Model](/webcast/gitlab-hashicorp-gitops/)\n* ***Testimonial***: [Shaping a financial service’s cloud strategy using GitLab and Terraform](https://www.youtube.com/watch?v=2LF3eOoGV_o&list=PLFGfElNsQthb4FD4y1UyEzi2ktSeIzLxj&index=6)\n\nCover image by [Rodolfo Cuadros](https://unsplash.com/@rocua18) on [Unsplash](https://unsplash.com/photos/JKzgp6vhJ8M)\n{: .note}\n",[550,9,959,1001],{"slug":1329,"featured":6,"template":701},"gitops-done-3-ways","content:en-us:blog:gitops-done-3-ways.yml","Gitops Done 3 Ways","en-us/blog/gitops-done-3-ways.yml","en-us/blog/gitops-done-3-ways",{"_path":1335,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1336,"content":1342,"config":1348,"_id":1350,"_type":14,"title":1351,"_source":16,"_file":1352,"_stem":1353,"_extension":19},"/en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"title":1337,"description":1338,"ogTitle":1337,"ogDescription":1338,"noIndex":6,"ogImage":1339,"ogUrl":1340,"ogSiteName":686,"ogType":687,"canonicalUrls":1340,"schema":1341},"How to host VueJS apps using GitLab Pages","Follow this tutorial, including detailed configuration guidance, to quickly get your application up and running for free.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683489/Blog/Hero%20Images/hosting.png","https://about.gitlab.com/blog/hosting-vuejs-apps-using-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to host VueJS apps using GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-09-13\",\n      }",{"title":1337,"description":1338,"authors":1343,"heroImage":1339,"date":1345,"body":1346,"category":741,"tags":1347},[1344,784],"Sophia Manicor","2023-09-13","\nIf you use VueJS to build websites, then you can host your website for free with GitLab Pages. This short tutorial walks you through a simple way to host and deploy your VueJS applications using GitLab CI/CD and GitLab Pages.\n\n## Prequisites\n- A VueJS application\n- Working knowledge of GitLab CI\n- 5 minutes\n\n## Setting up your VueJS application\n\n1) Install vue-cli.\n\n```bash\nnpm install -g @vue/cli\n# OR\nyarn global add @vue/cli\n```\nYou can check you have the right version of Vue with:\n\n```bash\nvue --version\n```\n\n2) Create your application using:\n\n```bash\nvue create name-of-app\n```\n\nWhen successfully completed, you will have a scaffolding of your VueJS application.\n\n## Setting up .gitlab-ci.yml for GitLab Pages\nBelow is the [GitLab CI configuration](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/.gitlab-ci.yml) necessary to deploy to GitLab Pages. Put this file into your root project. GitLab Pages always deploys your website from a specific folder called `public`.\n\n```yaml\nimage: \"node:16-alpine\"\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - yarn install --frozen-lockfile --check-files --non-interactive\n    - yarn build\n  artifacts:\n    paths:\n      - public\n\npages:\n  stage: deploy\n  script:\n    - echo 'Pages deployment job'\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n\n```\n\n## Vue config (vue.config.js)\nIn Vue, the artifacts are built in a folder called dist, in order for GitLab to deploy to Pages, we need to change the path of the artifacts. One way to do this is by changing the [Vue config file](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/vue.config.js), `vue.config.js`.\n\n```\nconst { defineConfig } = require('@vue/cli-service')\n\nfunction publicPath () {\n  if (process.env.CI_PAGES_URL) {\n    return new URL(process.env.CI_PAGES_URL).pathname\n  } else {\n    return '/'\n  }\n}\n\nmodule.exports = defineConfig({\n  transpileDependencies: true,\n  publicPath: publicPath(),\n  outputDir: 'public'\n})\n```\n\nHere we have set `outputDir` to `public` so that GitLab will pick up the build artifacts and deploy to Pages. Another important piece when creating this configuration file is to change the `publicPath`, which is the base URL your application will be deployed at. In this case, we have create a function `publicPath()` that checks if the CI_PAGES_URL environment variable is set and returns the correct base URL.\n\n## Run GitLab CI\n\n![vuejs-gitlab-pages-pipeline](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/vuejs-gitlab-pages-pipeline.png){: .shadow}\n\n\n## Check Pages to get your URL\n\n![gitlab-pages-domain](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/gitlab-page-domain.png){: .shadow}\n\nVoila! You have set up a VueJS project with a fully functioning CI/CD pipeline. Enjoy your VueJS application hosted by GitLab Pages!\n\n## References\n- [https://cli.vuejs.org/guide/installation.html](https://cli.vuejs.org/guide/installation.html)\n- [https://cli.vuejs.org/guide/creating-a-project.html](https://cli.vuejs.org/guide/creating-a-project.html)\n- [https://gitlab.com/demos/applications/vuejs-gitlab-pages](https://gitlab.com/demos/applications/vuejs-gitlab-pages)\n\n",[109,766,721,9],{"slug":1349,"featured":6,"template":701},"hosting-vuejs-apps-using-gitlab-pages","content:en-us:blog:hosting-vuejs-apps-using-gitlab-pages.yml","Hosting Vuejs Apps Using Gitlab Pages","en-us/blog/hosting-vuejs-apps-using-gitlab-pages.yml","en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"_path":1355,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1356,"content":1362,"config":1368,"_id":1370,"_type":14,"title":1371,"_source":16,"_file":1372,"_stem":1373,"_extension":19},"/en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"title":1357,"description":1358,"ogTitle":1357,"ogDescription":1358,"noIndex":6,"ogImage":1359,"ogUrl":1360,"ogSiteName":686,"ogType":687,"canonicalUrls":1360,"schema":1361},"How automation is making DevOps pros’ jobs easier","Six ways automation in a DevSecOps platform aids security, monitoring, compliance, and CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662504/Blog/Hero%20Images/devsecops-automated-security.jpg","https://about.gitlab.com/blog/how-automation-is-making-devops-pros-jobs-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How automation is making DevOps pros’ jobs easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-12-12\",\n      }",{"title":1357,"description":1358,"authors":1363,"heroImage":1359,"date":1364,"body":1365,"category":1103,"tags":1366},[1100],"2022-12-12","\nAs DevOps professionals look for ways to save time, money, and tech muscle as they work to push better and more secure software out the door, they’re increasingly seeing the advantages of automation — and that those advantages seamlessly come with adopting an end-to-end [DevSecOps](/topics/devsecops/) platform. \n\nIn a 2022 GitLab quiz, more than 82% of respondents said automation plays a “vital” role in developing and deploying safer and faster releases. \n\nIt’s clear that DevOps professionals are realizing that automation minimizes the need for a lot of extra hands-on and time-consuming work, like backup, installation, and maintenance. It also can reduce the potential for human error and provide consistency. A DevSecOps platform, unlike a cobbled-together [DIY toolchain](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html), offers many advantages, like visibility and collaboration. Another major benefit is that it offers automation for everything from alerts to [testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and monitoring.\n\n## Benefits of DevSecOps automation\n\nHere is how automation throughout the software lifecycle could help DevOps teams cut time and money spent on repetitive tasks, eliminate human errors, and streamline the whole DevOps process:\n\n1. Security – A critical benefit of migrating to a full DevSecOps platform is that software won’t simply get a security test at the end of the pipeline – an inefficient, and often costly, feedback system. When [security is shifted left](/blog/efficient-devsecops-nine-tips-shift-left/), if a vulnerability or compliance issue is introduced into the code, it’s identified almost immediately thanks to automated and consistent testing. Automation built into a DevOps platform leads to better software and reduces the time between designing new, higher-quality features and rolling them out into production. And that maximizes the overall return on software development.\n\n2. Compliance – With a single DevSecOps application, [compliance confirmation](/stages-devops-lifecycle/govern/) lives within the platform and is automated. That means professionals can verify the compliance of their code without leaving their workflow, removing the need for compliance managers to require developers to context switch among different point solutions in a DIY toolchain, which can lead to the loss of productivity and efficiency. \n\n3. Configuration – It’s a complicated job to set up, manage, and maintain application environments. [Automated configuration management](/stages-devops-lifecycle/configure/) is designed to handle these complex environments across servers, networks, and storage systems.\n\n4. Continuous integration (CI) – This is the step that enables the DevOps practice of iteration by committing changes to a shared source code repository early and often – often several times a day. [CI](/blog/basics-of-gitlab-ci-updated/) is all about efficiency. By automating manual work and testing code more frequently, teams can iterate faster and deploy new features with fewer bugs more often.\n\n5. Continuous delivery (CD) – This is a software development process that works in conjunction with continuous integration to automate the application release process. When [deployments are handled automatically](/blog/cd-automated-integrated/), software release [processes are low-risk, consistent, and repeatable](/blog/boring-solutions-faster-iteration/). \n\n6. Monitoring – This is a proactive, automated part of the process, focused on tracking software, infrastructure, and networks to trace status and raise alerts to problems. [Monitoring](/stages-devops-lifecycle/monitor/) increases security, reliability, and agility. \n\n## Automation by the numbers\n\nIn fact, the [GitLab 2022 Global DevSecOps Survey](https://learn.gitlab.com/dev-survey-22/2022-devsecops-report), which polled more than 5,000 DevSecOps professionals, showed that automation is becoming increasingly critical to all DevOps teams.\n\nThe survey found that 47% of teams report their testing is fully automated today, up from 25% last year. Another 21% plan to roll out test automation at some point in 2022, and 15% hope to do so in the next two or more years. And three-quarters of respondents told us their teams use a DevSecOps platform or plan to use one this year. \n\nWhy are they using a platform? Well, security professionals called out easier automation and more streamlined deployments.\n\n## Fewer repetitive and unnecessary tasks\n\nSo what is all of this automation enabling DevOps professionals to do? They’re able to let go of a lot of work. \n \nAccording to the DevSecOps Survey, respondents said they’ve been able to reduce a lot of repetitive tasks. For instance, they say they no longer have to do as much infrastructure “handholding” — they’re not manually testing their code, writing messy code, and ignoring code quality. \n \nWith automation, each task is performed identically and with consistency, reliability, and accuracy. This promotes speed and increases deliveries, and, ultimately, deployments. While it doesn’t remove humans from the picture, automation minimizes dependency on humans for managing recurring tasks. \n\nAnd with GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. Automation with the GitLab platform is ready to go. Check out the [“Ditching DIY DevOps for GitLab’s Single Platform”](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html) to learn more ways a platform can help DevOps teams.\n",[765,1367,721,9],"collaboration",{"slug":1369,"featured":6,"template":701},"how-automation-is-making-devops-pros-jobs-easier","content:en-us:blog:how-automation-is-making-devops-pros-jobs-easier.yml","How Automation Is Making Devops Pros Jobs Easier","en-us/blog/how-automation-is-making-devops-pros-jobs-easier.yml","en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"_path":1375,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1376,"content":1382,"config":1387,"_id":1389,"_type":14,"title":1390,"_source":16,"_file":1391,"_stem":1392,"_extension":19},"/en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"title":1377,"description":1378,"ogTitle":1377,"ogDescription":1378,"noIndex":6,"ogImage":1379,"ogUrl":1380,"ogSiteName":686,"ogType":687,"canonicalUrls":1380,"schema":1381},"How to become more productive with Gitlab CI","Explore some CI/CD strategies that can make your team more efficient and productive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667358/Blog/Hero%20Images/gitlab-productivity.jpg","https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to become more productive with Gitlab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-06-21\",\n      }",{"title":1377,"description":1378,"authors":1383,"heroImage":1379,"date":1384,"body":1385,"category":741,"tags":1386},[976],"2021-06-21","CI/CD pipelines are the preeminent solution to mitigate potential risks\nwhile integrating code changes into the repository. CI/CD pipelines help\nisolate the impact of potential errors, making it easier to fix them. Top\nthat with a tool that provides effective visibility into the running tasks\nand there you have a recipe for success.\n\n\nSince the primary purpose of CI/CD pipelines is to speed up the development\nprocess and provide value to the end user faster, there's always room to\nmake the process more efficient. This blog post unpacks some strategies that\ncan help you get the most out of your pipeline definition in [GitLab\nCI](/solutions/continuous-integration/).\n\n\n## How Directed Acyclic Graphs (DAG) enable concurrent pipelines\n\n\n![By using Needs keyword you can define dependencies for jobs that need to\nbe used from previous\nstages.](https://about.gitlab.com/images/blogimages/dag-explained.jpeg)\n\nBy using the \"Needs\" keyword you can define dependencies for jobs that need\nto be used from previous stages.\n\n{: .note.text-center}\n\n\nIn a\n[basic-pipeline](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines)\nstructure, all the jobs in a particular stage run concurrently and the jobs\nin the subsequent stage have to wait on those to finish to get started. This\ncontinues for all the stages.\n\n\nIn the image above, the first job in the second stage only depends on the\nfirst two job in the first stage to get started. But with the basic pipeline\norder in place, it has to wait for all three jobs in the first stage to\ncomplete before it can start executing, which slows down the overall\npipeline considerably. However, by using `needs:` keywords, you can define a\ndirect dependency for the jobs and they would only have to wait on the job\nthey depend on to get started. By using the [DAG\nstrategy](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), you could\nshed out a few minutes from the processes for a certain project, thereby\nincreasing the pipeline execution speed and bringing down the CI minutes\nconsumption.\n\n\nBy using `needs: []` you can make the job in any stage run immediately, as\nit doesn't have to wait on any other job to finish.\n\n\n## Why parallel jobs increase productivity\n\n\nNot all the jobs in a pipeline have an equal run-time. While some may take\njust a few seconds, some take much longer to finish. When there are many\nteam members waiting on a running pipeline to finish to be able to make a\ncontribution to the project, the productivity of the team takes a hit.\n\n\nGitLab provides a method to make clones of a job and run them in parallel\nfor faster execution using the `parallel:` keyword. While [parallel\njobs](https://docs.gitlab.com/ee/ci/yaml/#parallel) may not help in reducing\nthe consumption of [CI minutes](/pricing/faq-compute-minutes/), they\ndefinitely help increase work productivity.\n\n\n## Break down big pipelines with parallel matrix Jobs\n\n\nBefore the release of [parallel matrix\njobs](https://docs.gitlab.com/ee/ci/yaml/#parallel-matrix-jobs), in order to\nrun multiple instances of a job with different variable values, the jobs had\nto be manually defined in the `.gitlab-ci-yml` like this:\n\n\n```yaml\n\n.run-test:\n  script: run-test $PLATFORM\n  stage: test\n\ntest-win:\n  extends: .run-test\n  variables:\n    - PLATFORM: windows\ntest-mac:\n  extends: .run-test\n  variables:\n    - PLATFORM: mac\ntest-linux:\n  extends: .run-test\n  variables:\n    - PLATFORM: linux\n```\n\n\nParallel matrix jobs were released with GitLab 13.3 and allow you to create\njobs at runtime based on specified variables. Let's say there is a need to\nrun multiple instances a job with different variables values for each\ninstance — with a combination of `parallel:` and `matrix:` you accomplish\njust that.\n\n\n```yaml\n\ntest:\n  stage: test\n  script: run-test $PLATFORM\n  parallel:\n    matrix:\n      - PLATFORM: [windows, mac, linux]\n```\n\n\nBy using `parallel:` and `matrix:`, big pipelines can be broken down into\nmanageable parts for efficient maintainance.\n\n\n## Reduce the risk of merge conflicts with parent/child pipelines\n\n\n![Parent-child pipelines can include external YAML files in you\nconfiguration](https://about.gitlab.com/images/blogimages/parent-child-explained.jpeg)\n\nThe parent pipeline generates a child pipeline via the trigger:include\nkeywords.\n\n{: .note.text-center}\n\n\nFor better management of dependencies, many organizations prefer a mono-repo\nsetup for their projects. But mono-repos have a flip side too. If a\nrepository hosts a large number of projects and a single pipeline definition\nis used to trigger different automated processes for different components,\nthe pipeline performance is negatively affected. By using [parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nyou can design more efficient pipelines, since you can have multiple\nchild-pipelines that run in parallel. The keyword `include:` is used to\ninclude external YAML files in your CI/CD configuration for this purpose. In\nthe image above a pipeline (the parent) generates a child pipeline via the\ntrigger:include keywords.\n\n\nThis approach also reduces the chances of merge conflicts from happening, as\nit allows to only edit a section of the pipeline if necessary.\n\n\n## Merge trains help the target branch stay stable\n\n\nWhen there's a lot of merge requests flowing into a project, there is a risk\nof merge conflicts. [Merge\ntrains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a\npowerful feature by GitLab that allows users to automatically merge a series\nof (queued) merge requests without breaking the target branch. Using this\nfeature, you can add an MR to the train, and it would take care of it until\nit is merged.\n\n\n## Use multiple caches in the same job\n\n\nStarting 13.11, GitLab CI/CD provides the ability to [configure multiple\ncache keys in a single\njob](/releases/2021/04/22/gitlab-13-11-released/#use-multiple-caches-in-the-same-job)\nwhich will help you increase your pipeline performance. This functionality\ncould help you save precious development time when the jobs are running.\n\n\n## How can an efficient pipeline save you money?\n\n\nBy using CI/CD strategies that ensure safe merging of new changes and a\ngreen master, organizations can worry less about unanticipated downtimes\ncaused by infrastructural failures and code conflicts.\n\n\nWith faster pipelines, developers end up spending lesser time in maintenance\nand find time and space to bring in more thoughtfulness and creativity in\ntheir work, leading to improvements in code quality and the company\natmosphere and morale.\n\n\nIf you are looking to bring down the cost of running your CI/CD pipelines\nfor a large project, look up the [Artifact and cache\nsettings](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-and-cache-settings)\nand [Optimizing GitLab for large\nrepositories](https://docs.gitlab.com/ee/ci/large_repositories/) sections in\nthe documentation.\n",[721,9,917,853],{"slug":1388,"featured":6,"template":701},"how-to-become-more-productive-with-gitlab-ci","content:en-us:blog:how-to-become-more-productive-with-gitlab-ci.yml","How To Become More Productive With Gitlab Ci","en-us/blog/how-to-become-more-productive-with-gitlab-ci.yml","en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"_path":1394,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1395,"content":1401,"config":1407,"_id":1409,"_type":14,"title":1410,"_source":16,"_file":1411,"_stem":1412,"_extension":19},"/en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase",{"title":1396,"description":1397,"ogTitle":1396,"ogDescription":1397,"noIndex":6,"ogImage":1398,"ogUrl":1399,"ogSiteName":686,"ogType":687,"canonicalUrls":1399,"schema":1400},"How to bring DevOps to the database with GitLab and Liquibase","Learn how to build a continuous delivery pipeline for database code changes with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672677/Blog/Hero%20Images/metalgears_databasecasestudy.jpg","https://about.gitlab.com/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to bring DevOps to the database with GitLab and Liquibase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsvi Zandany\"}],\n        \"datePublished\": \"2022-01-05\",\n      }",{"title":1396,"description":1397,"authors":1402,"heroImage":1398,"date":1404,"body":1405,"category":741,"tags":1406},[1403],"Tsvi Zandany","2022-01-05","In the [Accelerate State of DevOps 2021\nReport](https://cloud.google.com/devops/state-of-devops/), the DevOps\nResearch and Assessment (DORA) team reveals “elite DevOps performers are 3.4\ntimes more likely to exercise database change management compared to their\nlow-performing counterparts.” Tracking changes with version control is not\njust for application code, though. It’s crucial for managing changes for one\nof your most important assets: your database.   \n\n\nThe GitLab DevOps platform enables database management teams to leverage\nCI/CD to track, manage, and deploy database changes, along with application\ndevelopment and automation and infrastructure as code. Database change\nmanagement tools have become more advanced in recent years, supporting\neasier collaboration and communication, which are the keys to successful\nDevOps. In this blog post, I’ll take you through a tutorial using\n[Liquibase](https://www.liquibase.com), a tool that integrates seamlessly\ninto the GitLab DevOps platform so your teams can deliver database code\nchanges as fast as application code changes (without compromising on quality\nand security). \n\n\n## What is Liquibase?\n\n\nLiquibase was founded as an open source project over 15 years ago to address\ngetting database changes into version control. With more than 75 million\ndownloads, the company behind Liquibase expanded to paid editions and\nsupport to help teams release software faster and safer by bringing the\ndatabase change process into their existing CI/CD automation.  \n\n\nIntegrating Liquibase with GitLab CI/CD enables database teams to leverage\nDevOps automation and best practices for database management. Liquibase\nhelps teams build automated database scripts and gain insights into when,\nwhere, and how database changes are deployed. In this tutorial, we’ll\ndemonstrate how to check database scripts for security and compliance\nissues, speed up database code reviews, perform easy rollbacks, and provide\ndatabase snapshots to check for malware.\n\n\n## Adding Liquibase to GitLab’s DevOps Platform\n\n\nTeams can add Liquibase to GitLab to enable true CI/CD for the database.\nIt’s easy to integrate Liquibase into your GitLab CI/CD pipeline. Before\njumping into the tutorial, let’s take a look at the [example Liquibase\nGitLab project\nrepository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server)\nyou’ll be using.\n\n\n### Understanding the example Liquibase GitLab project repository\n\n\n![A CI/CD pipeline\ndiagram](https://about.gitlab.com/images/blogimages/1_CICD_Pipeline_Diagram.png){:\n.shadow.small.center}\n\n\nFor this example, the GitLab CI/CD pipeline environments include DEV, QA,\nand PROD. This pipeline goes through several stages: build, test, deploy,\nand compare. A post stage comes into play later to capture a snapshot of\nyour database in Production.\n\n\nStages:\n  - build\n  - test\n  - deploy\n  - compare\n\n### Liquibase commands in the pipeline\n\n\nFor each of the predefined jobs in the GitLab repository, you’ll be using\nseveral Liquibase commands to help manage database changes quickly and\nsafely:\n\n\n- liquibase_job:\n\n  before_script:\n    - functions\n    - isUpToDate\n    - liquibase checks run\n    - liquibase updateSQL\n    - liquibase update\n    - liquibase rollbackOneUpdate --force\n    - liquibase tag $CI_PIPELINE_ID\n    - liquibase --logFile=${CI_JOB_NAME}_${CI_PIPELINE_ID}.log --logLevel=info update\n    - liquibase history\n\n  script:\n    - echo \"Comparing databases DEV --> QA\"\n    - liquibase diff\n    - liquibase --outputFile=diff_between_DEV_QA.json diff --format=json\n\n  script:\n    - echo \"Snapshotting database PROD\"\n    - liquibase --outputFile=snapshot_PROD.json snapshot --snapshotFormat=json\n\nLearn more about each of these commands in the [README file in the GitLab\nrepository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server/-/blob/main/README.md). \n\n\n## Tutorial\n\n\nThe following tutorial demonstrates how to run Liquibase in a GitLab CI/CD\npipeline. Follow along by watching this companion video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZBFhDayoRYo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n### Prerequisites\n\n\nTo start, I’m using a Linux machine with the following:\n\n\n- [A GitLab account](https://www.gitlab.com)\n\n- Self-managed Runner on a Linux machine\n\n- Git\n\n- Java 11\n\n- Access to a SQL Server database with multiple environments\n\n\n### Download, install, and configure Liquibase\n\n\n[Download Liquibase v4.6.1+](https://www.liquibase.org/download)\n\n\n[Install\nLiquibase](https://docs.liquibase.com/concepts/installation/installation-linux-unix-mac.html)\n\n\n[Get a free Liquibase Pro license key](https://www.liquibase.com/trial). No\ncredit card is required, so you can play with all the advanced features and\nget support for 30 days. You’ll use this key later when you configure\nenvironment variables within GitLab.\n\n\nEnsure Liquibase is installed properly by running the liquibase --version\ncommand. If everything is good you’ll see the following:\n\n\nStarting Liquibase at 18:10:06 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\nRunning Java under /usr/lib/jvm/java-11-openjdk-11.0.13.0.8-1.el7_9.x86_64\n(Version 11.0.13)\n\n\nLiquibase Version: 4.6.1\n\nLiquibase Community 4.6.1 by Liquibase\n\n\n### Prepare your GitLab project\n\n\nFork this [example GitLab project\nrepository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server).\n([See more information about forking a\nrepository](https://docs.gitlab.com/ee/user/project/repository/forking_workflow.html).)\n\n\n[Create a self-managed GitLab Runner](https://docs.gitlab.com/runner/) on\nyour Linux instance with your newly forked GitLab project.\n\n\nClone your newly forked project repository:\n\ngit clone https://gitlab.com/\u003Cusername>/sql_server.git\n\n\nGo to the “sql_server” project folder.\n\ncd sql_server\n\n\nRun the following command to change your git branch to staging:\n\ngit checkout staging\n\n\nConfigure the GitLab CI/CD pipeline environment variables.\n\n\nYour configuration will include [CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project),\n[Liquibase\nproperties](https://www.liquibase.com/blog/secure-database-developer-flow-using-gitlab-pipelines),\ndatabase credentials, and the Liquibase Pro trial license key so you can use\nall the advanced Liquibase commands.\n\n\nFrom the main sql_server project, go to Settings → CI/CD\n\n\nUnder Variables, click Expand and add the following variables:\n\n\n![A CI/CD pipeline\ndiagram](https://about.gitlab.com/images/blogimages/liquibasevariables.png){:\n.shadow.small.center}\n\n\n![A CI/CD pipeline\ndiagram](https://about.gitlab.com/images/blogimages/liquibasevariables2.png){:\n.shadow.small.center}\n\n\n### Configure the self-managed GitLab runner\n\n\nFrom the main sql_server project, go to Settings → CI/CD\n\n\nExpand the runners section, click the pencil edit icon, and add the\nfollowing runner tags (comma separated):\n\n\ndev_db,prod_db,test_db\n\n\nNote: Tags are created to help choose which runner will do the job. In this\nexample, we are associating all tags to one runner. Learn more about\n[configuring\nrunners](https://docs.gitlab.com/ee/ci/runners/configure_runners.html). \n\n\n### Make changes to the database\n\n\nEdit the changelog.sql file and add the following changeset after \n\n\n```\n\nliquibase formatted sql:\n\n-- changeset SteveZ:createTable_salesTableZ\n\nCREATE TABLE salesTableZ (\n   ID int NOT NULL,\n   NAME varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL,\n   REGION varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL,\n   MARKET varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL\n)\n\n--rollback DROP TABLE salesTableZ\n\nAdd, commit, and push all new database changes.\n\ngit add changelog.sql\n\ngit commit -m “added changelog id and a create table salesTableZ changeset”\n\ngit push -u origin staging\n\n```\n\n\n### Merge the changes and run the pipeline\n\n\nLet’s merge the changes from branch staging → main to trigger the pipeline\nto run all jobs.\n\n\nClick Merge requests → New merge request\n\n\nSelect staging as Source branch and main as Target branch\n\n\nClick Compare branches and continue\n\n\nOn the next screen, click Create merge request\n\n\nClick Merge to finish merging the changes\n\n\n![A look at the merge\nrequest](https://about.gitlab.com/images/blogimages/2_Merge_Request1.png){:\n.shadow.small.center}\n\n\n![Another look at the merge\nrequestt](https://about.gitlab.com/images/blogimages/3_Merge_Request2.png){:\n.shadow.small.center}\n\n\nOnce these steps are completed, the code is merged into main and the\npipeline is triggered to run.\n\n\n![The pipeline is\ntriggered](https://about.gitlab.com/images/blogimages/4_Merge_Request3.png){:\n.shadow.small.center}\n\n\nTo see the pipeline running, click Pipelines.\n\n\nTo view the pipeline progress, click the pipeline ID link. You can view each\njob’s log output by clicking on each job name.\n\n\n![The pipeline in\nprogress](https://about.gitlab.com/images/blogimages/5_Pipeline_Progress.png){:\n.shadow.small.center}\n\n\nClicking into the build-job example:\n\n\nThe liquibase checks run command validates the SQL for any violations.\n\n\n```\n\n57Starting Liquibase at 22:19:14 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\n58Liquibase Version: 4.6.1\n\n59Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun\n27 04:59:59 UTC 2022\n\n60Executing Quality Checks against changelog.sql\n\n61Executing all checks because a valid Liquibase Pro license was found!\n\n62Changesets Validated:\n\n63  ID: createTable_salesTableZ; Author: SteveZ; File path: changelog.sql\n\n64Checks run against each changeset:\n\n65  Warn on Detection of 'GRANT' Statements\n\n66  Warn on Detection of 'REVOKE' Statements\n\n67  Warn when 'DROP TABLE' detected\n\n68  Warn when 'DROP COLUMN' detected\n\n69  Check for specific patterns in sql (Short Name: SqlCreateRoleCheck)\n\n70  Warn when 'TRUNCATE TABLE' detected\n\n71  Warn on Detection of grant that contains 'WITH ADMIN OPTION'\n\n72Liquibase command 'checks run' was executed successfully.\n\n```\n\n\nThe liquibase update command deploys the changes. If you choose, you can\nview a full report of your changes in [Liquibase\nHub](https://docs.liquibase.com/tools-integrations/liquibase-hub/setup.html).\nThe update command also saves the deployment log output file as an artifact.\n\n\n```\n\n227Starting Liquibase at 22:19:34 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\n228Liquibase Version: 4.6.1\n\n229Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon\nJun 27 04:59:59 UTC 2022\n\n230----------------------------------------------------------------------\n\n231View a report of this operation at https://hub.liquibase.com/r/I7ens13ooM\n\n232* IMPORTANT: New users of Hub first need to Sign In to your account\n\n233with the one-time password sent to your email, which also serves as\n\n234your username.\n\n235----------------------------------------------------------------------\n\n236Logs saved to\n/home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/build-job_405710044.log\n\n237Liquibase command 'update' was executed successfully.\n\n```\n\n\nHere’s what your Liquibase Hub report will look like:\n\n\n![The hub report, part\none](https://about.gitlab.com/images/blogimages/6_LiquibaseHub_Report.png){:\n.shadow.small.center}\n\n\n![The hub report, part\ntwot](https://about.gitlab.com/images/blogimages/7_LiquibaseHub_Report.png){:\n.shadow.small.center}\n\n\nThe Liquibase history command will show what changes are currently in the\ndatabase.\n\n\n```\n\n255Starting Liquibase at 22:19:40 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\n256Liquibase Version: 4.6.1\n\n257Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon\nJun 27 04:59:59 UTC 2022\n\n258Liquibase History for\njdbc:sqlserver://localhost:1433;sendTemporalDataTypesAsStringForBulkCopy=true;delayLoadingLobs=true;useFmtOnly=false;useBulkCopyForBatchInsert=false;cancelQueryTimeout=-1;sslProtocol=TLS;jaasConfigurationName=SQLJDBCDriver;statementPoolingCacheSize=0;serverPreparedStatementDiscardThreshold=10;enablePrepareOnFirstPreparedStatementCall=false;fips=false;socketTimeout=0;authentication=NotSpecified;authenticationScheme=nativeAuthentication;xopenStates=false;sendTimeAsDatetime=true;trustStoreType=JKS;trustServerCertificate=false;TransparentNetworkIPResolution=true;serverNameAsACE=false;sendStringParametersAsUnicode=true;selectMethod=direct;responseBuffering=adaptive;queryTimeout=-1;packetSize=8000;multiSubnetFailover=false;loginTimeout=15;lockTimeout=-1;lastUpdateCount=true;encrypt=false;disableStatementPooling=true;databaseName=DEV;columnEncryptionSetting=Disabled;applicationName=Microsoft\nJDBC Driver for SQL Server;applicationIntent=readwrite;\n\n259- Database updated at 11/9/21, 10:19 PM. Applied 1 changeset(s),\nDeploymentId: 6496372605\n\n260  liquibase-internal::1636496372758::liquibase\n\n261- Database updated at 11/9/21, 10:19 PM. Applied 1 changeset(s),\nDeploymentId: 6496375151\n\n262  changelog.sql::createTable_salesTableZ::SteveZ\n\n263Liquibase command 'history' was executed successfully.\n\n```\n\n\n### Clicking into the DEV->QA job example from your pipeline\n\n\nWe run the liquibase diff command to compare the DEV and QA databases. This\nhelps detect any drift between the databases.\n\n\nNotice in the log output that there are some unexpected changes: \n\n\ntable named bad_table\n\n\nprocedure named bad_proc\n\n\n![The diff\nreport](https://about.gitlab.com/images/blogimages/8_LiquibaseDiff_Report.png){:\n.shadow.small.center}\n\n\nBy using the [Liquibase Pro trial license\nkey](https://www.liquibase.com/trial), you’re able to detect any stored\nlogic objects included in the diff report. Liquibase Pro also allows you to\ngenerate a parsable JSON output file and save it as an artifact for later\nuse.\n\n\n```\n\n137Starting Liquibase at 22:21:10 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\n138Liquibase Version: 4.6.1\n\n139Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon\nJun 27 04:59:59 UTC 2022\n\n140Output saved to\n/home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/diff_between_DEV_QA.json\n\n141Liquibase command 'diff' was executed successfully.\n\n```\n\n\nJSON artifact output file example:\n\n\n```\n\n{\n    \"diff\": {\n        \"diffFormat\": 1,\n        \"created\": \"Wed Dec 08 20:16:53 UTC 2021\",\n        \"databases\": {\n            \"reference\": {\n                \"majorVersion\": \"14\",\n                \"minorVersion\": \"00\",\n                \"name\": \"Microsoft SQL Server\",\n                \"url\": \"jdbc:sqlserver://localhost:1433;databaseName=DEV; ...\"\n            },\n            \"target\": {\n                \"majorVersion\": \"14\",\n                \"minorVersion\": \"00\",\n                \"name\": \"Microsoft SQL Server\",\n                \"url\": \"jdbc:sqlserver://localhost:1433;databaseName=QA; ...\"\n            }\n        },\n        \"unexpectedObjects\": [\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"bad_proc\",\n                    \"type\": \"storedProcedure\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"bad_table\",\n                    \"type\": \"table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"MARKET\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"ID\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"NAME\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"REGION\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            }\n        ],\n        \"changedObjects\": [\n            {\n                \"changedObject\": {\n                    \"name\": \"QA\",\n                    \"type\": \"catalog\",\n                    \"differences\": [\n                        {\n                            \"difference\": {\n                                \"comparedValue\": \"QA\",\n                                \"field\": \"name\",\n                                \"message\": \"name changed from 'DEV' to 'QA'\",\n                                \"referenceValue\": \"DEV\"\n                            }\n                        }\n                    ]\n                }\n            }\n        ]\n    }\n}\n\n\n```\n\n\nNote that the [Liquibase\ndiffChangelog](https://docs.liquibase.com/commands/diffchangelog.html) can\nhelp any baseline environments that have drifted. \n\n\nClicking into the snapshot PROD job example, the snapshot file contains all\nthe current schema changes represented in a JSON file. You can obtain the\nPROD database snapshot file to compare two states of the same database to\nprotect against malware with drift detection.\n\n\n```\n\n58Starting Liquibase at 22:21:32 (version 4.6.1 #98 built at 2021-11-04\n20:16+0000)\n\n59Liquibase Version: 4.6.1\n\n60Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun\n27 04:59:59 UTC 2022\n\n61Output saved to\n/home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/snapshot_PROD.json\n\n62Liquibase command 'snapshot' was executed successfully. \n\n64Uploading artifacts for successful job00:01\n\n70Cleaning up project directory and file based variables00:00\n\n72Job succeeded\n\n```\n\n\n### Congratulations! The pipeline ran successfully.\n\n\nIf all the jobs are successful, you’ll see a green checkmark right next to\neach one.\n\n\nHere’s what your database changes will look like with a database SQL query\ntool.\n\n\n![The\ndatabase](https://about.gitlab.com/images/blogimages/9_Database_Changes_SQL_Query_Tool.png){:\n.shadow.small.center}\n\n\n## Summing it up\n\n\nYou’ve now successfully run Liquibase in a GitLab pipeline to enable true\nCI/CD for the database. You can easily keep adding more changes to the\ndatabase by adding more Liquibase changesets to the changelog, commit them\nto GitLab version control, and repeat the merge request process described in\nthis tutorial to add the changes. \n\n\nStill have questions or want support integrating Liquibase with your Gitlab\nCI/CD Pipeline? Our team of database DevOps experts is happy to help! \n\n\n[Contact Liquibase](https://www.liquibase.com/contact)\n\n\n[Contact GitLab](/sales/)\n\n\nContact a [certified GitLab channel\npartner](https://www.google.com/url?q=https://partners.gitlab.com/English/directory/&sa=D&source=docs&ust=1641393355697069&usg=AOvVaw0R5mPukwMBR2dKsn3eQzqp)\n\n\nContact a [Liquibase channel partner](https://www.liquibase.com/partners)\n\n\nOther useful links: \n\n\n[Gitlab CI/CD setup Liquibase\ndocumentation](https://docs.liquibase.com/concepts/installation/setup-gitlab-cicd.html)\n\n\n[GitLab - Liquibase\nrepository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/liquibasegitlabcicd/-/blob/master/README.md) \n\n\nGet a [speedy, secure database developer\nflow](https://www.liquibase.com/blog/secure-database-developer-flow-using-gitlab-pipelines)\nusing GitLab pipelines & Liquibase\n\n\n_Author Tsvi Zandany is a Senior Solutions Architect at Liquibase_\n",[233,721,9],{"slug":1408,"featured":6,"template":701},"how-to-bring-devops-to-the-database-with-gitlab-and-liquibase","content:en-us:blog:how-to-bring-devops-to-the-database-with-gitlab-and-liquibase.yml","How To Bring Devops To The Database With Gitlab And Liquibase","en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase.yml","en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase",{"_path":1414,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1415,"content":1421,"config":1429,"_id":1431,"_type":14,"title":1432,"_source":16,"_file":1433,"_stem":1434,"_extension":19},"/en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"title":1416,"description":1417,"ogTitle":1416,"ogDescription":1417,"noIndex":6,"ogImage":1418,"ogUrl":1419,"ogSiteName":686,"ogType":687,"canonicalUrls":1419,"schema":1420},"CI/CD pipeline: GitLab & Helm for Kubernetes Auto Deploy","One user walks through how he tried GitLab caching and split the job into multiple steps to get better feedback.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sergey Nuzhdin\"}],\n        \"datePublished\": \"2017-09-21\",\n      }",{"title":1422,"description":1417,"authors":1423,"heroImage":1418,"date":1425,"body":1426,"category":741,"tags":1427},"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm",[1424],"Sergey Nuzhdin","2017-09-21","Recently, I started working on a few Golang\n[microservices](/topics/microservices/). I decided to try GitLab’s caching\nand split the job into multiple steps for better feedback in the UI.\n\n\n\u003C!-- more -->\n\n\nSince my previous\nposts[[1](http://blog.lwolf.org/post/how-to-build-tiny-golang-docker-images-with-gitlab-ci/)][[2](http://blog.lwolf.org/post/continuous-deployment-to-kubernetes-from-gitlab-ci/)]\nabout [CI/CD](/topics/ci-cd/), a lot has changed. I started using Helm\ncharts for packaging applications, and stopped using docker-in-docker in\ngitlab-runner.\n\n\nHere are a few of the main changes to my `.gitlab-ci.yml` file since my\nprevious post:\n\n\n* no docker-in-docker\n\n* using cache for packages instead of a prebuilt image with dependencies\n\n* splitting everything into multiple steps\n\n* autodeploy to staging environment using Helm, a package manager for\nKubernetes\n\n\n### Building Golang image\n\n\nSince Golang is very strict about the location of the project, we need to\nmake some adjustments to the CI job. This is done in the `before_script`\nblock. Simply create needed directories and link source code in there.\nAssuming that the official repository of the project is\n`gitlab.example.com/librerio/libr_files` it should look like this.\n\n\n```\n\nvariables:\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n```\n\n\nWith this in place, we can install dependencies and build our binaries. To\navoid the download of all packages on each build we need to configure\ncaching. Due to the strange caching rules of GitLab, we need to add vendor\ndirectory to both cache and artifacts. Cache will give us an ability to use\nit between build jobs and artifacts will allow us to use it inside the same\njob.\n\n\n```\n\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\n```\n\n\nBuild step didn’t change, it’s still about building the binary. I add binary\nto artifacts.\n\n\n```\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n```\n\n\n###  Test stage\n\n\nTo run golang tests with coverage reports I’m using the variation of [this\nshell\nscript](https://github.com/mlafeldt/chef-runner/blob/v0.7.0/script/coverage).\nIt runs all tests in project subdirectories and creates a [coverage\nreport](/blog/publish-code-coverage-report-with-gitlab-pages/). I changed it\na bit before putting into a gist. I exclude vendor directory from tests.\n\n\n* coverage regexp for gitlab-ci: `^total:\\s*\\(statements\\)\\s*(\\d+.\\d+\\%)`\n\n\n### Deploy stage\n\n\nI don’t use native GitLab’s integration with Kubernetes.\n\n\nFirst I thought about creating Kubernetes secrets and mounting it to the\ngitlab-runner pod. But it’s very complicated. You need to upgrade deployment\nevery time you want to add new Kubernetes cluster configurations. So I’m\nusing GitLab’s CI/CD variables with base64 encoded Kubernetes config. Each\nproject can have any number of configurations. The process is easy – create\nbase64 string from the configuration file and copy it to the clipboard.\nAfter this, put it into `kube_config` variable (name it whatever you like).\n\n\n`cat ~/.kube/config | base64 | pbcopy`\n\n\nIf you do not own a full GitLab installation, consider creating a Kubernetes\nuser with restricted permissions.\n\n\nThen on the deploy stage, we can decode this variable back into the file and\nuse it with kubectl.\n\n\n```\n\nvariables:\n  KUBECONFIG: /etc/deploy/config\n\ndeploy:\n  ...\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n```\n\n\nDeploy stage also covers the case when you have several versions of the same\napplication.\n\n\nFor example, you have two versions of API: v1.0 and v1.1. All you need to do\nis set `appVersion` in Chart.yaml file. Build system will check API version\nand either deploy or upgrade needed release.\n\n\n```\n\n- export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n\n- export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n\n- export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n\n- if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} .\n--namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} .\n--namespace=${STAGING_NAMESPACE}; fi\n\n```\n\n\n### tl;dr\n\n\n```\n\nHere is complete `.gitlab-ci.yaml` file for reference.\n\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n\nstages:\n  - setup\n  - test\n  - build\n  - release\n  - deploy\n\nvariables:\n  CONTAINER_IMAGE: ${CI_REGISTRY}/${CI_PROJECT_PATH}:${CI_BUILD_REF_NAME}_${CI_BUILD_REF}\n  CONTAINER_IMAGE_LATEST: ${CI_REGISTRY}/${CI_PROJECT_PATH}:latest\n  DOCKER_DRIVER: overlay2\n\n  KUBECONFIG: /etc/deploy/config\n  STAGING_NAMESPACE: app-stage\n  PRODUCTION_NAMESPACE: app-prod\n\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n  POSTGRES_USER: gorma\n  POSTGRES_DB: test-${CI_BUILD_REF}\n  POSTGRES_PASSWORD: gorma\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n\nrelease:\n  stage: release\n  image: docker:latest\n  script:\n    - cd ${APP_PATH}/release\n    - docker login -u gitlab-ci-token -p ${CI_BUILD_TOKEN} ${CI_REGISTRY}\n    - docker build -t ${CONTAINER_IMAGE} .\n    - docker tag ${CONTAINER_IMAGE} ${CONTAINER_IMAGE_LATEST}\n    - docker push ${CONTAINER_IMAGE}\n    - docker push ${CONTAINER_IMAGE_LATEST}\n\ntest:\n  stage: test\n  image: lwolf/golang-glide:0.12.3\n  services:\n    - postgres:9.6\n  script:\n    - cd ${APP_PATH}\n    - curl -o coverage.sh https://gist.githubusercontent.com/lwolf/3764a3b6cd08387e80aa6ca3b9534b8a/raw\n    - sh coverage.sh\n\ndeploy_staging:\n  stage: deploy\n  image: lwolf/helm-kubectl-docker:v152_213\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n  script:\n    - cd deploy/libr-files\n    - helm dep build\n    - export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n    - export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n    - export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n    - if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n  environment:\n    name: staging\n    url: https://librerio.example.com\n  only:\n  - master\n\n```\n\n\n_[How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab\nand\nHelm](http://blog.lwolf.org/post/how-to-create-ci-cd-pipeline-with-autodeploy-k8s-gitlab-helm/)\nwas originally published on Lwolfs Blog._\n\n\nPhoto by C Chapman on [Unsplash](https://unsplash.com/)\n",[721,9,766,1428],"user stories",{"slug":1430,"featured":6,"template":701},"how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","content:en-us:blog:how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","How To Create A Ci Cd Pipeline With Auto Deploy To Kubernetes Using Gitlab","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"_path":1436,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1437,"content":1442,"config":1449,"_id":1451,"_type":14,"title":1452,"_source":16,"_file":1453,"_stem":1454,"_extension":19},"/en-us/blog/how-to-keep-up-with-ci-cd-best-practices",{"title":1438,"description":1439,"ogTitle":1438,"ogDescription":1439,"noIndex":6,"ogImage":823,"ogUrl":1440,"ogSiteName":686,"ogType":687,"canonicalUrls":1440,"schema":1441},"How to keep up with CI/CD best practices","In this post, we look at continuous integration/continuous delivery (CI/CD), how to implement some best practices, and why it is important.","https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep up with CI/CD best practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-02-03\",\n      }",{"title":1438,"description":1439,"authors":1443,"heroImage":823,"date":1445,"body":1446,"category":1447,"tags":1448},[1444],"Valerie Silverthorne","2022-02-03","\nContinuous integration and continuous delivery (CI/CD) are at the heart of any successful DevOps practice. Teams wanting to achieve modern software development must keep up with [CI/CD](/topics/ci-cd/) best practices. Here’s what you need to know to make sure your team is on the right track.\n\n## What is the meaning of CI/CD?\n\nIt’s a tech process, it’s a mindset, it’s a series of steps… CI/CD is all of those things. Put simply, CI enables DevOps teams to streamline code development using automation. CI simplifies software builds and source code integration, enables version control, and promotes greater collaboration via automation. Where CI leaves off, continuous delivery kicks in with automated testing and deployment. Not only does CD reduce the amount of “hands on” time ops pros need to spend on delivery and deployment, it also enables teams to [drastically reduce the number of tools](/resources/whitepaper-forrester-manage-your-toolchain/) required to manage the lifecycle.\n\n## What are the best practices for CICD?\n\nIf you want to be successful with CI/CD, make continuous integration, delivery, and deployment your mantra as they are the cornerstones of software development practices. The goal of DevOps is to get software to users more quickly than traditional methods, and these development practices will help make that happen.\n\nIf you ask 10 DevOps teams for their take on CI/CD best practices, granted, you'll likely get 10 different answers. However, there are several tips that are widely agreed upon:\n\n1. Only build once: Don't create a new build for each stage because you risk introducing inconsistencies. Instead, promote the same build artifacts throughout each stage of the CI/CD pipeline. This requires an environment-agnostic build.\n\n2. Streamline the tests: Strike a balance between test coverage and performance. If it takes too long for test results users will try to circumvent the process.\n\n3. Fail fast: On the CI side, devs committing code need to know as quickly as possible if there are issues so they can roll the code back and fix it while it’s fresh in their minds. The idea of “fail fast” helps reduce developer context switching too, which makes for happier DevOps professionals.\n\n4. Make it daily: The more regular the code commits, the more benefit DevOps teams will see.\n\n5. Fix it if it’s broken: CI/CD makes it simple to fix broken builds.\n\n6. Clean pre-production environments:The longer environments are kept running, the harder it becomes to track all the configuration changes and updates that have been applied. This is good incentive to clean up pre-production environments between each deployment. \n\n7. Automation all the time: Keep tweaking the CI/CD pipeline to ensure the “continuous automation” state is achieved.\n\n8. Know the steps: Make sure the release and rollback plans are well documented and understood by the entire team.\n\n9. Keep it safe: CI/CD is a shift left, so it offers a good opportunity to integrate security earlier in the process.\n\n10. It’s a loop: Make sure there’s an easy way for the entire team to receive (and contribute to) feedback.\n\n## Continuous delivery best practices\n\nContinuous delivery/deployment feels like it deserves it’s own deep dive into best practices because CI often steals most of the headlines. Here is a roundup of CD best practices:\n\n- Start where you are: Don’t wait for a new platform. It’s always possible to tweak what you have to make it faster and more efficient.\n\n- Less is more: The best CD is done with minimal tools.\n\n- Track what’s happening: Issues and merge requests can get out of hand. If milestones are an option, they can help. Bonus: Milestones do double-duty when setting up Agile sprints and releases.\n\n- Automatically deploy changes: Streamline user acceptance testing and staging with automation.\n\n- Manage the release pipeline: Automation is the answer.\n\n- Establish monitoring: Keeping a good eye on the production process saves time and money. It also can provide key data points to the business side.\n\n- Kick off continuous deployment: Once continuous delivery is humming, bring on the hands-free deployment where it’s possible to send changes to production automatically. \n\n## How to improve the CI/CD pipeline\n\nA pipeline is just another way of characterizing the series of steps involved in deploying a new version of software. Monitoring and automation are concepts introduced in a CI/CD pipeline to improve the app development process, especially during the integration and testing phases, as well as when software is delivered and deployed.\n\nThe typical elements of a CI/CD pipeline are: plan, analyze, design, build, test, release, deploy, validation and compliance and maintenance. These steps can be done manually, but the real value of a CI/CD pipeline comes when they are automated.\n\nIf it’s time to finetune the CI/CD pipeline, consider the following performance enhancements:\n\n- Mix up the release strategy. A [canary release](https://martinfowler.com/bliki/CanaryRelease.html) (sometimes called a canary deployment) might be worth considering. In a canary release, new features are deployed to just a select group of users.\n\n- Add more automated testing because there is [never enough automated testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/). \n\n- Continue to pare down. Fewer tools mean fewer handoffs and steps. If CI/CD is part of a [DevOps platform](/topics/devops-platform/), everything will be in one place. \n\n- Consider a routine practice of [software composition analysis](https://www.csoonline.com/article/3640808/software-composition-analysis-explained-and-how-it-identifies-open-source-software-risks.html) to ensure the DevOps team is keeping track of critical open source software issues. \n\n## How to measure the success of CI/CD \n\nDevOps teams can’t know how well their CI/CD practices are going unless they measure them. [Metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/) play an important role in improving system performance and helping to identify where value can be added. They also provide a baseline for measuring the impact of any improvements made.\n\n Here are the best metrics to employ:\n\n### Cycle time\nThis refers to how long it takes to roll out a functional application from the time work on the code begins. To figure out the average life cycle time, measure the development process phases. This metric will provide insight into what the overall development time is and any bottlenecks in the process.\n\n### Time to value\nThis refers to how long it takes to release written code. The integration, testing, delivery, and deployment should take anywhere from minutes up to a few hours for test cycles to finish. If it takes days to move a build through the CI/CD pipeline time to value is not being realized and the process should be fine-tuned.\n\n### Uptime\nUptime is a measure of stability and reliability and whether everything is working as it should. It is one of the biggest priorities the ops team has. When the CI/CD strategy is automated, ops leaders can focus more of their time on system stability and less time on workflow issues.\n\n### Error rates\nApplication error rates is a fact of life in the development process. Tracking them is very important because not only can error rates indicate quality problems, but also ongoing performance and uptime related issues. \nIf uptime and error rates seem high, it can illustrate a [common CI/CD challenge](https://about.gitlab.com/blog/modernize-your-ci-cd/) between dev and ops teams. Operations goals are a key indicator of process success.\n\n### Infrastructure costs\nInfrastructure costs are critically important with cloud native development. Deploying and managing a CI/CD platform can result in big expenses if they are not kept in check.\nTo determine how they will set their prices, cloud providers will consider what the cost is of network hardware, infrastructure maintenance, and labor. \n\n### Team retention\nIt’s no mystery: When a developer – or anyone, really – feels valued and satisfied they’re apt to stick around. When teams work well together and know how to collaborate, retention is likely to follow. On the flip side, developers might feel uncomfortable speaking up if they don’t like how things are going, but looking at retention rates can help identify potential problems.\n\n##  What are the benefits of following CI/CD best practices?\n\nWhen best practices are followed, the [benefits of CI/CD](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) are felt throughout an organization: From HR to operations, teams work better and achieve goals. Establishing metrics around CI/CD performance can go beyond providing insights on development and carry over to many aspects of the business. \n\nA well-functioning CI/CD pipeline can be a game changer for DevOps teams. Here are some of the biggest benefits:\n\n**Developers aren’t fixing things, they’re writing code.** Fewer tools and toolchains mean less time spent on maintenance and more time spent actually producing high-quality software applications.\n\n**Code is in production.** Rather than sitting in a queue, code actually makes it out into the real world. This also leads to happier developers.\n\n**Developers have the bandwidth to focus on solving business problems.** A streamlined CI/CD process lets developers actually focus on what matters and not on the distractions of problem code, missed handoffs, production issues, and more.\n\n**It’s easier to innovate.** It’s a competitive world, and organizations need all the tools at their disposal to stay ahead. A well-built CI/CD process makes software development easier, faster and safer, which means DevOps teams have the time and energy to think outside the box.\n\n**Attract and retain talent.** It’s a very competitive labor market and DevOps talent can be very hard to impress. Nothing says “we take our DevOps team seriously” more than an organization that’s invested in the technology and processes around CI/CD.\n\n**Everyone does what they do best.** Dev, ops, sec and test each have a critical role to play, and CI/CD helps [clearly delineate the responsibilities](/topics/devops/build-a-devops-team/).\n\n## CI/CD deployment strategy\n\nRemember that CI/CD is about getting a software application into the hands of a customer that is better and done quicker than before. Organizations that adopt CI/CD find their productivity improves significantly. The trick is coming up with a deployment strategy that works for the individual organization. \n\nHere are some strategies to help make a deployment successful:\n\n- Commit to frequency in CD\n- Automate the build process\n- Run tests in parallel, and create a deployment pipeline\n- Fail fast and adopt a shift left mentality to give developers the skills and tools to accelerate without breaking things \n- Use CI tools that provide faster feedback\n\n## How can I implement CI/CD in my organization?\n\nBefore any software is implemented, it’s key to determine what the business drivers are and the same goes for adopting CI/CD. All development stakeholders should be involved early on in the implementation process. Developers should provide input since they will be the main users of a product. \n\nMake sure to do your due diligence when researching software that enables CI/CD, and ask about free trials. \n\nWhile it may seem counterintuitive since CI/CD is about accelerating the pace of software delivery in an automated fashion, start the process with a mentality of slow and steady. The boost in efficiency will decline if bugs are steadily moving into the finished application. \n\nIt’s important to have consistency in the integration process. Perform unit tests, trigger releases manually and track metrics. Then determine what can and should be automated.\n","insights",[721,9,765],{"slug":1450,"featured":6,"template":701},"how-to-keep-up-with-ci-cd-best-practices","content:en-us:blog:how-to-keep-up-with-ci-cd-best-practices.yml","How To Keep Up With Ci Cd Best Practices","en-us/blog/how-to-keep-up-with-ci-cd-best-practices.yml","en-us/blog/how-to-keep-up-with-ci-cd-best-practices",{"_path":1456,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1457,"content":1462,"config":1467,"_id":1469,"_type":14,"title":1470,"_source":16,"_file":1471,"_stem":1472,"_extension":19},"/en-us/blog/impact-of-the-file-type-variable-change-15-7",{"title":1458,"description":1459,"ogTitle":1458,"ogDescription":1459,"noIndex":6,"ogImage":908,"ogUrl":1460,"ogSiteName":686,"ogType":687,"canonicalUrls":1460,"schema":1461},"Understanding the file type variable expansion change in GitLab 15.7","Learn how the change to file type variable expansion can impact CI jobs that rely on the file contents and what to do.","https://about.gitlab.com/blog/impact-of-the-file-type-variable-change-15-7","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understanding the file type variable expansion change in GitLab 15.7\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2023-02-13\",\n      }",{"title":1458,"description":1459,"authors":1463,"heroImage":908,"date":1464,"body":1465,"category":808,"tags":1466},[913],"2023-02-13","In GitLab 15.7, we stopped expanding `file type` variables in CI jobs. CI\njobs that rely on the old expansion method will generate errors and not\nwork. Here is a look at how this change came about, the difference in job\noutputs, and what to do next.\n\n\n## Background\n\n\nGitLab CI has long-supported file type CI/CD variables. This is a helpful\nfeature for CI jobs, as a file variable is a simple way to pass values to an\nexternal system. In cases where there is a concern about environment\nvariable size limits, putting the information in a file and using an\nenvironment variable to reference the file is a good option.\n\n\nBefore 15.7, variable expansion expanded the contents of the file referenced\nin a file type variable. Some users found this expansion behavior to be\nquite valuable. In looking at some metrics on GitLab.com, for example, we\nsaw over 1,000 unique projects that used a file variable inside another\nvariable. However, other users did not find this unintended behavior helpful\nand implemented workarounds.\n\n\nAs expected, a file referenced in a file type variable may contain sensitive\ndata. So performing variable expansion on the file contents could expose\nthat data in the build environment. Even though the risk could be somewhat\nmitigated, continuing to expand file type variables was not the right\napproach to ensure the most secure system.\n\n\n## Example of the job output before and after 15.7\n\n\n1. Create a file variable via the GitLab UI. For example: `A_FILE_VAR` with\nthe value `this is some super secret content`.\n\n1. Create a CI job with this content:\n\n\n```\n\ntest_job:\n   stage: test\n   variables:\n     REF_FILE_VAR: $A_FILE_VAR\n   script:\n     - echo $A_FILE_VAR\n     - cat $A_FILE_VAR\n     - echo $REF_FILE_VAR\n     - cat $REF_FILE_VAR\n\n```\n\n\n**Results before 15.7:**\n\n\n```\n\n$ echo $A_FILE_VAR\n\n/builds/test-project-repo/test-project.tmp/A_FILE_VAR\n\n$ cat $A_FILE_VAR\n\nthis is some super secret content\n\n$ echo $REF_FILE_VAR\n\nthis is some super secret content\n\n$ cat $REF_FILE_VAR\n\ncat: can't open 'this': No such file or directory\n\ncat: can't open 'is': No such file or directory\n\ncat: can't open 'some': No such file or directory\n\ncat: can't open 'super': No such file or directory\n\ncat: can't open 'secret': No such file or directory\n\ncat: can't open 'content': No such file or directory\n\n\n```\n\n\n**Results after 15.7:**\n\n\n```\n\n$ echo $A_FILE_VAR\n\n/builds/test-project-repo/test-project.tmp/A_FILE_VAR\n\n$ cat $A_FILE_VAR\n\nthis is some super secret content\n\n$ echo $REF_FILE_VAR\n\n/builds/test-project-repo/test-project.tmp/A_FILE_VAR\n\n$ cat $REF_FILE_VAR\n\nthis is some super secret content\n\n\n```\n\n\nYou will notice in the 15.7+ job output the echo command no longer prints\nthe contents of the file.\n\n\n## What is the current status of the change?\n\n\nWe\n[deprecated](https://docs.gitlab.com/ee/update/deprecations.html#file-type-variable-expansion-in-gitlab-ciyml)\nthis feature in 15.5 and removed it from the code base in\n[15.7](https://gitlab.com/gitlab-org/gitlab/-/issues/29407). However, we\nneglected to include a follow-up removal notice in the 15.7 release, so some\nself-managed customers now upgrading to 15.7+ may have missed the initial\ndeprecation notice.\n\n\n## What do I need to do before upgrading to 15.7 or higher?\n\n\n1. Check your CI jobs for any instances where a file variable is referenced\ninside another variable.\n\n2. Change the references and test the CI jobs.\n\n\n## What’s next\n\n\nSecrets and variable handling are likely some of the most complex areas in a\nDevSecOps platform. On our end, we are continuously refining our processes\nto effectively communicate the potential impact of new features or the\nremoval of existing ones. We also recommend that you reach out to us (the\nVerify team) directly on issues referenced in a release or deprecation\nnotice if it's not clear how a change might affect your CI workflows.\n",[721,9,917],{"slug":1468,"featured":6,"template":701},"impact-of-the-file-type-variable-change-15-7","content:en-us:blog:impact-of-the-file-type-variable-change-15-7.yml","Impact Of The File Type Variable Change 15 7","en-us/blog/impact-of-the-file-type-variable-change-15-7.yml","en-us/blog/impact-of-the-file-type-variable-change-15-7",{"_path":1474,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1475,"content":1481,"config":1487,"_id":1489,"_type":14,"title":1490,"_source":16,"_file":1491,"_stem":1492,"_extension":19},"/en-us/blog/improve-cd-workflows-helm-chart-registry",{"title":1476,"description":1477,"ogTitle":1476,"ogDescription":1477,"noIndex":6,"ogImage":1478,"ogUrl":1479,"ogSiteName":686,"ogType":687,"canonicalUrls":1479,"schema":1480},"Get started with GitLab's Helm Package Registry","Improve CD workflows and speed up application deployment using our new Helm Package Registry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668078/Blog/Hero%20Images/cover-image-helm-registry.jpg","https://about.gitlab.com/blog/improve-cd-workflows-helm-chart-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab's Helm Package Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philip Welz\"}],\n        \"datePublished\": \"2021-10-18\",\n      }",{"title":1476,"description":1477,"authors":1482,"heroImage":1478,"date":1484,"body":1485,"category":1103,"tags":1486},[1483],"Philip Welz","2021-10-18","In our 14.1 release, we offered the ability to add Helm charts to the GitLab\nPackage Registry. Here's everything you need to know to leverage application\ndeployment with these new features.\n\n\n## The role of container images\n\n\nThe de-facto standard is to package applications into [OCI\nImages](https://github.com/opencontainers/image-spec) which are often just\nreferred to as `container images` and more often as `Docker containers`. The\n[Open Container Initiative](https://opencontainers.org/) was launched in\n2015 by Docker and other companies to define industry standards around\ncontainer image formats and runtimes. GitLab introduced an OCI conform\n[Container Registry](/blog/gitlab-container-registry/) with the release of\n[GitLab 8.8](/releases/2016/05/22/gitlab-8-8-released/) in May 2016.\n\n\nToday, a common and widely adopted approach is to deploy applications with\n[Helm charts](https://helm.sh/) to [Kubernetes](https://kubernetes.io/).\nThis will be covered in this blog together with the feature release in\n[GitLab 14.1](/releases/2021/07/22/gitlab-14-1-released/) of adding Helm\nCharts to the [GitLab Package\nRegistry](https://docs.gitlab.com/ee/user/packages/package_registry/).\n\n\n### Install software to Kubernetes\n\n\nIn the DevOps era, [APIs](https://en.wikipedia.org/wiki/API) became\nincredibly popular, helping to drive demand for Kubernetes.\n\n\nThe core of Kubernetes' control plane is the API server. The API server\nexposes an HTTP REST API that lets end users, different parts of your\ncluster, and external components communicate with one another.\n\n\nTo interact with the API server we can use the command-line tool\n[kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) - although\nit would be also possible to use software development kits (SDKs) or any\nclient that understands REST like curl that was released 1997.\n\n\nBut which data format is best to use?\n\n\nModern APIs most likely use JSON. JSON is a human-readable format that\nprovides provide access to machine-readable data. Here is an example for\nKubernetes:\n\n\n```json\n\n{\n    \"kind\": \"Pod\",\n    \"apiVersion\": \"v1\",\n    \"metadata\": {\n        \"name\": \"nginx\",\n        \"creationTimestamp\": null,\n        \"labels\": {\n            \"run\": \"nginx\"\n        }\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"name\": \"nginx\",\n                \"image\": \"nginx\",\n                \"resources\": {}\n            }\n        ],\n        \"restartPolicy\": \"Always\",\n        \"dnsPolicy\": \"ClusterFirst\"\n    },\n    \"status\": {}\n}\n\n```\n\n\nOne downside of JSON is that comments are not supported. That is one several\nreasons why YAML stepped in and took the spot as the de-facto language to\nuse for declarative configurations. The Kubernetes API transforms YAML to\nJSON behind the scenes. As you can easily convert back and forth between\nboth, YAML tends to be more user-friendly. Nginx example Pod in YAML:\n\n\n```yaml\n\napiVersion: v1\n\nkind: Pod\n\nmetadata:\n  creationTimestamp: null\n  labels:\n    run: nginx\n  name: nginx\nspec:\n  Containers:\n  # NOTE: If no tag is specified latest will be used\n  - image: nginx\n    name: nginx\n    # TODO\n    resources: {}\n  dnsPolicy: ClusterFirst\n  restartPolicy: Always\nstatus: {}\n\n```\n\n\nNow you are ready to save our YAML code in a file called `nginx.yaml` and\ndeploy it into Kubernetes:\n\n\n```shell\n\n$ kubectl apply --filename=nginx.yaml \n\n```\n\n\n### Create a Helm chart\n\n\nApplying YAML configuration files can get overwhelming, especially when\nneeding to deploy into several environments or wanting to version the\nmanifests. It is also cumbersome to maintain plain YAML files for more\ncomplex deployments which can easily extend to more than 1000 lines per\nfile.\n\n\nInstead, how about using a format that packages our applications and makes\nthem easily reproducible with templates? How about adding our own versioning\nscheme to this packaged application? How about deploying the same version\nwith a few lines of code to multiple environments? This all comes with Helm.\n\n\nTo create a Helm package you have to ensure that the Helm CLI is\n[installed](https://helm.sh/docs/intro/install/) on your system (example\nwith Homebrew on macOS: `brew install helm`).\n\n\n```shell\n\n$ helm create nginx \n\n```\n\n\nInspect the created Helm boilerplate files with `ls -lR` or `tree` on the\nCLI. This Helm chart can also be tested in a sandbox environment to verify\nit is operational.\n\n\n```shell\n\n.\n\n├── Chart.yaml\n\n├── charts\n\n├── templates\n\n│   ├── NOTES.txt\n\n│   ├── _helpers.tpl\n\n│   ├── deployment.yaml\n\n│   ├── hpa.yaml\n\n│   ├── ingress.yaml\n\n│   ├── service.yaml\n\n│   ├── serviceaccount.yaml\n\n│   └── tests\n\n│       └── test-connection.yaml\n\n└── values.yaml\n\n```\n\n\nNOTE: You can read more about the starter Chart\n[here](https://helm.sh/docs/chart_template_guide/getting_started/).\n\n\nKindly Helm creates a starter chart directory along with the common files\nand directories used in a chart with NGINX as an example. We again can\ninstall this into our Kubernetes cluster:\n\n\n```shell\n\n$ helm install nginx .\n\n```\n\n\n### Package Distribution\n\n\nThus far, we have learned that applications are packaged in containers and\nare installed using a Helm chart. Both methods require central distribution\nstorage that is publicly accessible, or accessible in your local network\nenvironment where the Kubernetes clusters are running.\n\n\nThe Helm documentation provides insights on [running your own Helm\nregistry](https://helm.sh/docs/topics/registries/), similar to hosting your\nown Docker container registry.\n\n\nWhat if we could avoid Do It Yourself DevOps and have both containers and\nHelm charts in one central DevOps platform? After maturing the [container\nregistry in\nGitLab](https://docs.gitlab.com/ee/user/packages/container_registry/),\ncommunity contributors helped add the [Helm chart\nregistry](https://docs.gitlab.com/ee/user/packages/helm_repository/index.html)\nin 14.1.\n\n\nBuilding the container image and Helm chart is part of the CI/CD pipeline\nstages and jobs. The missing bit is the automated production deployment\nusing Helm charts in your Kubernetes cluster.\n\n\nAn additional benefit in CI/CD is reusing the authentication mechanism, and\nworking in the same trust environment with security jobs before actually\nuploading and publishing any containers and charts.\n\n\n### Build the Helm Chart\n\n\n```shell\n\n$ helm package nginx \n\n```\n\n\nThe command creates a new tar.gz archive ready to upload. Before doing so,\nyou can inspect the archive with the `tar` command to verify its content.\n\n\n```shell\n\n$ tar ztf nginx-0.1.0.tgz\n\n\nnginx/Chart.yaml\n\nnginx/values.yaml\n\nnginx/templates/NOTES.txt\n\nnginx/templates/_helpers.tpl\n\nnginx/templates/deployment.yaml\n\nnginx/templates/hpa.yaml\n\nnginx/templates/ingress.yaml\n\nnginx/templates/service.yaml \n\nnginx/templates/serviceaccount.yaml\n\nnginx/templates/tests/test-connection.yaml\n\nnginx/.helmignore\n\n```\n\n\n### Push the Helm chart to the registry\n\n\nWith the [helm-push](https://github.com/chartmuseum/helm-push/#readme)\nplugin for Helm we can now upload the chart to the GitLab Helm Package\nRegistry:\n\n\n```shell\n\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token>\n\u003CREGISTRY_NAME>\nhttps://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n\n$ helm push nginx-0.1.0.tgz nginx\n\n```\n\n\nThis step should be automated for a production-ready deployment with a\nGitLab CI/CD job.\n\n\n```yaml\n\ndefault:\n  image: dtzar/helm-kubectl\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - upload\nupload:\n  stage: upload\n  script:\n    - 'helm plugin install https://github.com/chartmuseum/helm-push.git'\n    - 'helm push ./charts/podtatoserver-0.1.0.tgz ${CI_PROJECT_NAME}'\n```\n\n\n### Install the Helm chart\n\n\nFirst, add the Helm chart registry to your local CLI configuration and test\nthe manual installation.\n\n\n```shell\n\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token>\n\u003CREGISTRY_NAME>\nhttps://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n\n$ helm install --name nginx \u003CREGISTRY_NAME>/nginx\n\n```\n\n\nOnce it works, you can continue with adding an automated installation job\ninto the CI/CD pipeline.\n\n\n```yaml\n\ndefault:\n  image: alpine/helm\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - install\nupload:\n  stage: install\n  script:\n    - 'helm repo update'\n    - 'helm install --name nginx ${CI_PROJECT_NAME}/nginx'\n```\n\n\n### Complete your DevOps lifecycle\n\n\nYou can learn more about the newest GitLab registries for Helm and Terraform\nin this [#EveryoneCanContribute cafe\nsession](https://everyonecancontribute.com/post/2021-07-28-cafe-40-terraform-helm-gitlab-registry/)\nand inspect the [deployment\nrepository](https://gitlab.com/everyonecancontribute/kubernetes/civo-k3s).\n\n\nTry the Helm chart registry and share your workflows. Are there any features\nmissing to complete your DevOps lifecycle? Let us know [on\nDiscord](https://discord.gg/qgQWhD6wWV).\n\n\nCover image by [Joseph Barrientos](https://unsplash.com/@jbcreate_) on\n[Unsplash](https://unsplash.com/photos/eUMEWE-7Ewg)\n\n{: .note}\n",[765,9,959],{"slug":1488,"featured":6,"template":701},"improve-cd-workflows-helm-chart-registry","content:en-us:blog:improve-cd-workflows-helm-chart-registry.yml","Improve Cd Workflows Helm Chart Registry","en-us/blog/improve-cd-workflows-helm-chart-registry.yml","en-us/blog/improve-cd-workflows-helm-chart-registry",{"_path":1494,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1495,"content":1501,"config":1507,"_id":1509,"_type":14,"title":1510,"_source":16,"_file":1511,"_stem":1512,"_extension":19},"/en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"title":1496,"description":1497,"ogTitle":1496,"ogDescription":1497,"noIndex":6,"ogImage":1498,"ogUrl":1499,"ogSiteName":686,"ogType":687,"canonicalUrls":1499,"schema":1500},"Inside the improved CI logs management experience for multi-line commands","Reviewing log output for CI/CD jobs with multi-line commands is now easier than ever. Find out why, how to configure your pipelines, and what's ahead.\n\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099499/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099498739.jpg","https://about.gitlab.com/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the improved CI logs management experience for multi-line commands\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Romuald Atchadé\"}],\n        \"datePublished\": \"2024-01-25\",\n      }",{"title":1496,"description":1497,"authors":1502,"heroImage":1498,"date":1504,"body":1505,"category":741,"tags":1506},[1503],"Romuald Atchadé","2024-01-25","Improving the GitLab CI/CD log experience for jobs with multi-line commands\nhas been a long-requested feature. With the latest release of GitLab and\nGitLab Runner, it's now easier to work with the log section for jobs with\nmulti-line commands. In this post, we will describe the experience with the\nnew feature, show you how to enable the new log output in your pipelines,\nand discuss key points regarding CI/CD script execution and log output in\nvarious shells, such as Bash and Powershell.\n\n\n## Overview of multi-line commands\n\n\nFirst, it’s helpful to describe what we mean by a CI job with multi-line\ncommands. In GitLab CI the script keyword is used to specify commands to\nexecute for a CI job. In the example below, the build-job has a single\ncommand, a basic echo statement, to execute in the script block. \n\n\n```\n\n## A pipeline with a single line command in the script block for the\nbuild-job\n\n\nbuild-job:\n  stage: build\n  script:\n    - echo \"this is the script to run for the build job\"\n\n```\n\n\nIf you were to run this pipeline, then the log output in the UI would\ndisplay as follows:\n\nLine 17 - GitLab CI automatically generates a log entry for the command that\nyou specify in the script block.\n\nLine 18 - This is the output of the command that was executed.\n\n\n![Ci log management - image\n2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099524655.png)\n\n\nNow as you can imagine, the script that you define in the script block will\nlikely be more complex than the example provided and could very well span\nmultiple lines in the CI/CD pipeline file. \n\n\n```\n\n## A pipeline with a multi-line command in the script block for the\nbuild-job\n\n\nbuild-job:\n  stage: build\n  script:\n       - |\n         echo \"this is a multi-line command\"  # a simple echo statement\n         ls  \n\n```\n\n\nIf you were to run this pipeline, then the log output in the UI would\ndisplay as follows:\n\n\nLine 17 - As in the previous example, GitLab CI automatically generates a\nlog entry for the command that you specify in the script block. You will\nnotice that line 17 only includes the first command in the script block.\nThis makes it more difficult to debug an issue with script execution as you\nwill need to refer back to the source pipeline file to see exactly what\nscript was executed.\n\n\n![CI log management - image\n3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099524656.png)\n\n\n## So what’s new?\n\n\nStarting in GitLab 16.7 and GitLab Runner 16.7, you can now enable a feature\nflag titled FF_SCRIPT_SECTIONS, which will add a collapsible output section\nto the CI job log for multi-line command script blocks. This feature flag\nchanges the log output for CI jobs that execute within the Bash shell.\n\n\n![CI log management - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099524658.png)\n\n\nLine 17: Unlike the previous examples, the first thing you will notice in\nthe screenshot above is that by default the log entry for the multi-line\ncommand is collapsed by default.\n\n\nSingle-line commands do not display in a collapsible element.\n\n\nFor multi-line scripts the multi-line command is now a collapsible element,\nso now, when you uncollapse the log entry for line 17, then the log will\ndisplay all of the commands that were executed in the script block.\n\n\n![CI log management - image\n1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099524659.png)\n\n\nThere is also the [`custom collapsible\nsection`](https://docs.gitlab.com/ee/ci/jobs/#custom-collapsible-sections)\nfeature, which in combination with this new multi-command output capability\ndoes provide you additional flexibility for displaying log output in the UI.\nHere is how you can use the two features to change the log output. \n\n\n```\n\n## A pipeline with a multi-line command in the script block for the\nbuild-job\n\n\nvariables:\n  FF_PRINT_POD_EVENTS: \"true\"\n  FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n  FF_SCRIPT_SECTIONS: \"true\"\n\ncollapsible_job_multiple:\n  stage: build\n  script:\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:my_first_section\\r\\033[0KHeader of the 1st collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:second_section\\r\\033[0KHeader of the 2nd collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - echo -e \"\\033[0Ksection_end:`date +%s`:second_section\\r\\033[0K\"\n    - echo -e \"\\033[0Ksection_end:`date +%s`:my_first_section\\r\\033[0K\"\n\n```\n\n\nIf you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag\nset to false, then the log output would be as depicted in the following\nscreenshot.\n\n\n![CI log management - image\n5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099524661.png)\n\n\nBut, if you were to run this pipeline with the FF_SCRIPT_SECTIONS feature\nflag set to true, then the log output would be as depicted in the following\nscreenshot.\n\n\n![CI log management - image\n6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099524663.png)\n\n\n## What about other shells?\n\n\nAs of the 16.7 release, the collapsible output section in the CI job log for\nmulti-line command script blocks is only visible for CI/CD jobs that are\nexecuted with the Bash shell. CI/CD jobs executed with Powershell is not\ncurrently supported. We plan to add this\n[capability](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4494)\nin a future release. \n\n\n## What are our future plans?\n\n\nA few features are still needed to improve the CI/CD job log output, and the\n`timestamp` for each log line is one of them. This addition will add missing\nfeatures such as command/section duration.\n\n\n> To learn more about GitLab CI/CD features, refer to the official [CI/CD\ndocumentation](https://docs.gitlab.com/ee/ci/index.html). \n\n\n_Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab._\n",[721,9,109,766],{"slug":1508,"featured":91,"template":701},"inside-the-improved-ci-logs-management-experience-for-multi-line-commands","content:en-us:blog:inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","Inside The Improved Ci Logs Management Experience For Multi Line Commands","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"_path":1514,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1515,"content":1520,"config":1525,"_id":1527,"_type":14,"title":1528,"_source":16,"_file":1529,"_stem":1530,"_extension":19},"/en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"title":1516,"description":1517,"ogTitle":1516,"ogDescription":1517,"noIndex":6,"ogImage":1258,"ogUrl":1518,"ogSiteName":686,"ogType":687,"canonicalUrls":1518,"schema":1519},"How to integrate GitLab.com with Jira Cloud","Check out how to use the GitLab App on the Atlassian Marketplace to connect your merge requests, branches, and commits to a Jira issue.","https://about.gitlab.com/blog/integrating-gitlab-com-with-atlassian-jira-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate GitLab.com with Jira Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-03-25\",\n      }",{"title":1516,"description":1517,"authors":1521,"heroImage":1258,"date":1522,"body":1523,"category":741,"tags":1524},[1263],"2021-03-25","By moving to the cloud engineering teams can accelerate innovation and scale resources across an organization. The ease of access and reduced infrastructure costs that comes with moving to the cloud is a direct result of using a platform that easily integrates your data and keeps it secure yet accessible. Gitlab.com, the cloud (SAAS) platform for GitLab, modernizes data platforms to leverage new applications and advances end-to-end software delivery. GitLab partners with other best-in-class cloud companies so your teams can use tools that best align with your team's DevOps ecosystem. Application development requires speed and iteration, making seamless collaboration a necessity to deliver real business value. GitLab embraces connecting all phases of the software development lifecycle (SDLC) in a DevOps ecosystem that fuels visibility, collaboration, and velocity.\n\n## How to use GitLab with Atlassian's Jira\n\nWe know that many companies have been using Jira for project management, and have existing data and business processes built into their instance. For some of these customers, this means it can be difficult and cost-prohibitive to move off of Jira. We believe that people (and tools) work better when they're all in one place, so to serve these customers, we built a seamless integration between GitLab and Jira. By using the [GitLab for Jira app in the Atlassian Marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud), you can integrate GitLab.com and Jira Cloud harmoniously.\n\nHere's a short list of what you can do when integrating GitLab with Jira:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance.\n* Quickly navigate to Jira issues from GitLab.\n* Detect and link to Jira issues from GitLab commits and merge requests.\n* Log GitLab events in the associated Jira issue.\n* Automatically close (also called \"transition\") Jira issues with GitLab commits and merge requests.\n\n## How to configure the integration\n\nThere are two methods for configuring the integration. The [Jira DVCS connector](https://docs.gitlab.com/ee/integration/jira/dvcs/), and the method we describe in this blog post. The DVCS connector updates data only once per hour, while our method syncs data in real time. We recommend using our method for this reason, but if you are not using both of these environments then use the Jira DVCS connector instead.\n\n- First, go to Jira Settings > Apps > Find new apps, then search for GitLab.\n- Next, click GitLab for Jira, then click \"Get it now\". Or, go the [App in the marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-for-jira), directly.\n\n![Arrow pointing to \"get it now button\" on GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabonatlassianmarketplace.png){: .shadow.medium.center}\nClick the yellow button to download the app.\n{: .note.text-center}\n\n- Third, after installing, click \"Get started to go to the configurations\" page. This page is always available under Jira Settings > Apps > Manage apps.\n\n![GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/manageappsjira.png){: .shadow.medium.center}\nClick the \"Get started button\".\n{: .note.text-center}\n\n- Fourth, in Namespace, enter the group or personal namespace, and then click \"Link namespace to Jira\". The user that is setting up GitLab for Jira must have Maintainer access to the GitLab namespace. Note: The GitLab user only needs access when adding a new namespace. For syncing with Jira, we do not depend on the user’s token.\n\n![GitLab for Jira Configuration](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabforjiraintegration.png){: .shadow.medium.center}\nAdd a namespace.\n{: .note.text-center}\n\nAfter a namespace is added, all of the future commits, branches, and merge requests within all projects under that namespace will be synced to Jira. At the moment, past data cannot be synced.\n\nFor more information, see [the documentation](https://docs.gitlab.com/ee/integration/jira/index.html#usage).\n\n### How to troubleshoot GitLab for Jira\n\nThe GitLab for Jira App uses an iframe to add namespaces on the settings page. Some browsers block cross-site cookies which can lead to a message saying that the user needs to log on to GitLab.com even though the user is already logged in: \"You need to sign in or sign up before continuing.\"\n\nIn this situation, we recommend using [Firefox](https://www.mozilla.org/en-US/firefox/), [Google Chrome](https://www.google.com/chrome/index.html) or enabling cross-site cookies in your browser.\n\n### What are the limitations of GitLab for Jira?\n\nThis integration is currently not supported on GitLab instances under a [relative URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-a-relative-url-for-gitlab) (for example, http://yourcompanyname.com/gitlab).\n\n## How to use GitLab for Jira\n\nAfter the integrating GitLab and Jira, you can:\n\n- Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n- Using commit messages in GitLab, you can move Jira issues along that Jira projects defined transitions.\n\n![GitLab for Jira Setup](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot.png){: .shadow.medium.center}\nIn this image, you can see that this Jira issue has four stages: Backlog, selected for development, in progress, and done.\n{: .note.text-center}\n\n- As referenced in the base GitLab-Jira integration, when you reference an issue in a comment on a merge request and commit, e.g., PROJECT-7, the basic integration adds a comment in Jira issue. Also, by commenting in a Jira transition (putting a # first), this will move a Jira issue to the desired transition. Below is an example using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![View of Jira Transitions](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot2.png){: .shadow.medium.center}\nThere are multiple Jira transition options.\n{: .note.text-center}\n\n- Now, the user can see linked branches, commits, and merge requests in Jira issues (merge requests are called \"pull requests\" in Jira issues).\nJira issue IDs must be formatted in UPPERCASE for the integration to work.\n\n![View branches, commits and merge requests in your jira issue](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot4.png){: .shadow.medium.center}\nView branches, commits, and merge requests in your Jira issue.\n{: .note.text-center}\n\n- Click the links to see your GitLab repository data.\n\n![Deep Dive into your GitLab commits](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot5.png){: .shadow.medium.center}\nHow to take a look at your GitLab commits.\n{: .note.text-center}\n\n![Deep Dive into your GitLab branches](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot6.png){: .shadow.medium.center}\nTake a deep Dive into your GitLab merge requests.\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page using [Smart Commits](https://support.atlassian.com/jira-cloud-administration/docs/enable-smart-commits/).\n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/SwR-g1s1zTo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab.\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[1001,765,9],{"slug":1526,"featured":6,"template":701},"integrating-gitlab-com-with-atlassian-jira-cloud","content:en-us:blog:integrating-gitlab-com-with-atlassian-jira-cloud.yml","Integrating Gitlab Com With Atlassian Jira Cloud","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud.yml","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"_path":1532,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1533,"content":1539,"config":1544,"_id":1546,"_type":14,"title":1547,"_source":16,"_file":1548,"_stem":1549,"_extension":19},"/en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation",{"title":1534,"description":1535,"ogTitle":1534,"ogDescription":1535,"noIndex":6,"ogImage":1536,"ogUrl":1537,"ogSiteName":686,"ogType":687,"canonicalUrls":1537,"schema":1538},"Introducing CI/CD Steps, a programming language for DevSecOps automation","Inside GitLab’s vision for CI/CD programmability and a look at how we simplified workflow automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665151/Blog/Hero%20Images/blog-image-template-1800x945__27_.png","https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing CI/CD Steps, a programming language for DevSecOps automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2024-08-06\",\n      }",{"title":1534,"description":1535,"authors":1540,"heroImage":1536,"date":1541,"body":1542,"category":1146,"tags":1543},[913],"2024-08-06","For years, the DevOps industry has tried to simplify how developers create automation scripts or workflows to automatically test a code change and to perform a task with the resulting artifact or binary. Today, we are introducing [CI/CD Steps](https://docs.gitlab.com/ee/ci/steps/), a programming language for DevSecOps automation in experiment phase, as a solution to this challenge. With CI/CD Steps, software development teams can easily create complex automation workflows within GitLab.\n\n## The path to CI/CD Steps\n\nEarly in the company's history, GitLab founders and engineers decided that there must be a tight integration between source code management, the place you store your code, and continuous integration, the automation workflows that test your code changes. And we've continued to evolve that integration, focusing on workflow automation tasks and differentiating from the approaches of CI engines across the industry, including Jenkins CI's domain-specific language, GitHub Actions, and many more. \n\nAnd, yes, I did mean to use the term workflow automation tasks rather than [CI and continuous deployment (CD)](https://about.gitlab.com/topics/ci-cd/). This is simply a result of the code that I have seen our customers develop. In a lot of cases, the platform engineering teams that support development teams using GitLab are writing complex automation scripts (workflows). So we need to embrace a more expansive construct beyond simply CI and CD. In fact, I have seen some developers rave about the flexibility of new CI/CD solutions that allow for modularity and conditionals in writing automation workflows.\n\nAt GitLab, our initial approach for CI authoring was based on YAML. We can endlessly debate the pros and cons of such a choice, but for me, as a [DevOps](https://about.gitlab.com/topics/devops/) practitioner coming from a large Fortune 50 company with a moshpit of Jenkins Groovy code and hundreds of permutations of scripts basically performing the same job, the GitLab CI authoring and execution approach was a breath of fresh air. \n\nThe first time I read a GitLab CI file – this was back in mid-2019 – my first thought was, \"No, it could not be that simple.\" A non-developer can easily grasp the intent of a basic GitLab CI pipeline without prior knowledge of all of the intricacies of the syntax of the execution model. In fact, I had just spent a year working on a team that spent several hours each day helping other development teams debug Jenkins pipelines written in Groovy and trying to figure out how to test, and in some cases build, large Java monoliths; in other cases, tons of microservices.\n\nWhile there are benefits to a GitLab CI YAML-based authoring and a bash script execution type approach, there are also limitations. Limitations that developers or platform engineers bump into as they integrate more complex workflows into their CI pipelines. These issues seem to be amplified at enterprise scale as platform teams are trying to simplify or standardize workflows across multiple development teams. In fact, one of the quotes from a recent customer survey states: “GitLab needs to embrace a post-YAML world for CI.”\n\nSo, over the past two years, our pipeline authoring team, led by Product Manager [Dov Hershkovitch](https://gitlab.com/dhershkovitch), has been working extensively on improving the pipeline authoring experience. They've also been improving the management experience of the building blocks for workflow automation – especially at scale. In fact, a part of this work, the [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), recently became generally available.\n\nThe logical next step was to build a new language for workflow automation.\n\n## Understanding CI/CD Steps\n\nGitLab CI/CD Steps is a concept incubated by our top-notch engineers. In [our documentation](https://docs.gitlab.com/ee/ci/steps/), we describe CI/CD Steps as reusable and composable pieces of a CI job that can be referenced in a GitLab CI pipeline configuration. But what does that really mean and what is the long-term value proposition?\n\nAs I was giving this some thought, a comment from one of our customers (paraphrased here) came to mind:\n\n“CI/CD Steps enables you to compose inputs and outputs for a CI/CD job. With CI/CD Steps, developers can define inputs and outputs and, therefore, use CI/CD Steps as a function as we do in any modern programming language. A key differentiator to a normal CI/CD component is that CI/CD Steps allows the use of the outputs of other steps without GitLab having to know certain values before running the pipeline. With CI/CD Steps, you could more easily auto-cancel redundant jobs when all jobs are running as part of the parent pipeline versus having to use child pipelines.”\n\nHaving CI/CD Steps alongside the current GitLab CI/CD execution mechanism and the [CI/CD component catalog](https://docs.gitlab.com/ee/ci/components/index.html) unlocks so many possibilities for creating and maintaining the most complex CI/CD workflows. \n\nA key feature is reusability. Now, I am not suggesting that once we release CI/CD Steps as generally available, you would immediately start refactoring your currently working CI/CD jobs to CI/CD Steps. Instead, you likely will find opportunities to introduce CI/CD Steps to optimize complex pipeline workflows, and, in doing so, you will begin to reuse a CI/CD Step that you author in multiple pipelines.\n\nCI/CD Steps is a marathon, not a sprint. When we release this in beta (currently targeted for late 2024) and start getting feedback from you, we will learn new information that will guide the evolution of this new CI programming language as well as the new Step Runner, which is designed specifically to run CI/CD Steps alongside the current CI/CD jobs.\n\nI'm sure there will be questions about our strategy: Why did we make certain syntax choices? Why didn't we use Starlark as the basis for this new approach? Why did we create something new that we all have to learn? My boilerplate response is: At GitLab we develop our software in the open. More importantly, as a customer, user, and community member, if you have an idea of how to make it better, we invite you to create a merge request so we can improve this feature together.\n\nWe are the only enterprise software platform where, as users and customers, **you** have a direct say in how the platform evolves and **you** can see the changes happening transparently and in real time. That’s the power of GitLab – we iterate and we collaborate. You have invested in a platform and community that is able to evolve with the ever-changing software industry.\n\n## Create your own CI/CD step\n\nTo get a deeper understanding of CI Steps and our direction, take a look at the detailed refactoring proof-of-concept writeup in [this issue](https://gitlab.com/gitlab-org/step-runner/-/issues/85). [Principal engineer Joe Burnett](https://gitlab.com/josephburnett) walks through in great detail the thought process for refactoring a CI/CD job used as part of our GitLab Runner automated test framework. There are also recommendations noted at the end that will inform the evolution of the CI Steps syntax.\n\nThen check out the [CI/CD Steps tutorial](https://docs.gitlab.com/ee/tutorials/setup_steps/) and try creating your own CI/CD step. We recently released the `run` keyword, so testing out a CI/CD step will be simpler than previous examples that required using environment variables. This feature set is experimental so please share your experiences on the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/460057). There also is a separate feedback issue if you are testing the [Run GitHub Actions with CI/CD Steps experimental feature](https://docs.gitlab.com/ee/ci/steps/#actions).\n\nWe look forward to working with you on this journey to continuously improve the GitLab CI/CD authoring experience.\n\n## Read more\n- [CI/CD Catalog goes GA](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n- [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n- [What is CI/CD?](https://about.gitlab.com/topics/ci-cd/)\n- [The basics of CI](https://about.gitlab.com/blog/basics-of-gitlab-ci-updated/)\n",[496,109,9,721,917],{"slug":1545,"featured":91,"template":701},"introducing-ci-cd-steps-a-programming-language-for-devsecops-automation","content:en-us:blog:introducing-ci-cd-steps-a-programming-language-for-devsecops-automation.yml","Introducing Ci Cd Steps A Programming Language For Devsecops Automation","en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation.yml","en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation",{"_path":1551,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1552,"content":1558,"config":1564,"_id":1566,"_type":14,"title":1567,"_source":16,"_file":1568,"_stem":1569,"_extension":19},"/en-us/blog/keeping-your-development-dry",{"title":1553,"description":1554,"ogTitle":1553,"ogDescription":1554,"noIndex":6,"ogImage":1555,"ogUrl":1556,"ogSiteName":686,"ogType":687,"canonicalUrls":1556,"schema":1557},"DRY development: A cheatsheet on reusability throughout GitLab","How to follow the DevOps principle of 'don't repeat yourself' to optimize CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683555/Blog/Hero%20Images/drylights.jpg","https://about.gitlab.com/blog/keeping-your-development-dry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DRY development: A cheatsheet on reusability throughout GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"}],\n        \"datePublished\": \"2023-01-03\",\n      }",{"title":1553,"description":1554,"authors":1559,"heroImage":1555,"date":1561,"body":1562,"category":741,"tags":1563},[784,1560],"Joe Randazzo","2023-01-03","More than 20 years ago, the book [The Pragmatic\nProgrammer](https://pragprog.com/titles/tpp20/the-pragmatic-programmer-20th-anniversary-edition/)\nbrought attention to the DRY principle, or “Don’t Repeat Yourself.\" This\nprinciple is defined as every piece of knowledge must have a single,\nunambiguous, authoritative representation within a system.\n\n\nThe main problem to solve here is minimizing duplication. As a development\nproject is bombarded with new requests or changing requirements, DevOps\nteams must balance between development of net-new features or maintaining\nexisting code. The important part is how to reduce duplicate knowledge\nacross projects.\n\n\nThis tutorial explores the mechanisms throughout GitLab that leverage the\nDRY principle to cut down on code duplication and standardize on knowledge.\nTo see working examples of reusability in action, take a look at this\n[repository](https://gitlab.com/guided-explorations/gitlab-ci-yml-tips-tricks-and-hacks/dry-repository-a-cheatsheet).\n\n\n## Minimizing duplication in CI/CD\n\n\n### include\n\n[`include`](https://docs.gitlab.com/ee/ci/yaml/index.html#include) can be\nused to transform a single .gitlab-ci.yml file into multiple files to\nimprove readability and minimize duplication. For example, testing,\nsecurity, or deployment workflows can be broken out into separate templates.\nThis also allows\n[ownership](https://docs.gitlab.com/ee/user/project/codeowners/) of the\nfiles.\n\n\n\n```yaml\n\ninclude:\n  - template: CI/Build.gitlab-ci.yml\n  - template: CI/Test.gitlab-ci.yml\n  - template: CI/Security.gitlab-ci.yml\n  - template: CD/Deploy.gitlab-ci.yml\n\n```\n\n\n### YAML anchors\n\n[YAML\nanchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#anchors)\ncan be used to reduce repeat syntax and extend blocks of CI workflow,\nincluding jobs, variables, and scripts.\n\n\n```yaml\n\n.test_template: &test_suite\n  image: ruby:2.6\n\nunit_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nend_to_end_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nsmoke_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n```\n\n\n### extends\n\n[`extends`](https://docs.gitlab.com/ee/ci/yaml/index.html#extends) is\nsimilar to anchors with additional flexibility and readability. The major\ndifference is it can be used with `includes`.\n\n\n```yaml\n\n\n.prepare_deploy:\n  stage: deploy\n  script:\n    - echo \"I am preparing the deploy\"\n  only:\n    - main\n\ndeploy_to_dev:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to dev environment\"\n  environment: dev\n\ndeploy_to_production:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to production environment\"\n  when: manual\n  environment: production\n```\n\n\n### !reference\n\n[`!reference`](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags)\nenables the selection of keyword configuration from other job sections and\nreuse in the current session.\n\n\n```yaml\n\n.vars:\n  variables:\n    DEV_URL: \"http://dev-url.com\"\n    STAGING_URL: \"http://staging-url.com\"\n\n.setup_env:\n  script:\n    - echo \"Creating Environment\"\n\n.teardown_env:\n  after_script:\n    - echo \"Deleting Environment\"\n\nintegration_test:\n  variables: !reference [.vars, variables, DEV_URL]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n\nperformance_test:\n  variables: !reference [.vars, variables]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n```\n\n\n### Downstream pipelines\n\n[Downstream\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nenable the breakout of microservices and their pipelines. A .gitlab-ci.yml\nfile can be used for each service, and when a file or directory is changed,\nonly that pipeline needs to be triggered improving the awareness and\nreadability of what’s deploying.\n\n\n```yaml\n\nui:\n  trigger:\n    include: ui/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [ui/*]\n\nbackend:\n  trigger:\n    include: backend/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [backend/*]\n```\n\n\n![Dynamic child\npipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/parent-child.png){:\n.shadow}\n\n\n### CI/CD variables\n\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) can be scoped to\na specific level, including the project, group, instance level, or\n.gitlab-ci.yml level. The values can be stored and reused across a group for\nproject inheritance or overwritten at the project level.\n\n\n```yaml\n\nvariables:\n  PROJECT_LEVEL_VARIABLES: \"I am first in line in precedence\"\n  GROUP_LEVEL_VARIABLES: \"I am second in line\"\n  INSTANCE_LEVEL_VARIABLES: \"I am in third place\"\n  GITLAB_CI_YML_LEVEL_VARIABLES: \"I am last in line of precedence\"\n\n```\n\n\n## Creating consistent code reviews across multiple teams\n\n\n### Description templates\n\n[Description\ntemplates](https://docs.gitlab.com/ee/user/project/description_templates.html)\nenable teams to define a consistent workflow for issues or merge requests.\nFor example, the MR template can define a checklist for rolling out to a\nfeature to ensure it’s documented, quality tested, and reviewed by\nappropriate team members. Here are [MR\ntemplates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/merge_request_templates)\nthat GitLab team members use daily.\n\n\n```md\n\n\u003C!-- These templates can be set at the instance or group level to share\namongst the organization:\nhttps://docs.gitlab.com/ee/user/project/description_templates.html#set-instance-level-description-templates\n-->\n\n\n## What does this MR do?\n\n\n\u003C!-- Briefly describe what this MR is about. -->\n\n\n## Related issues\n\n\n\u003C!-- Link related issues below. -->\n\n\n## Create a checklist for the author or reviewer\n\n- [ ] Optional. Consider taking this writing course before publishing a\nchange.\n\n- [ ] Follow the documentation process stated here.\n\n- [ ] Tag this user group if this applies.\n\n\n\n\u003C!-- Quick Actions - See\nhttps://docs.gitlab.com/ee/user/project/quick_actions.html#issues-merge-requests-and-epics\nfor a list of all the quick actions available. -->\n\n\n\u003C!-- Add a label to assign a specific workflow using scoped labels -->\n\n/label ~documentation ~\"type::maintenance\" ~\"docs::improvement\"\n~\"maintenance::refactor\"\n\n\n\u003C!-- Apply draft format automatically -->\n\n/draft\n\n\n\u003C!-- Assign myself or a usergroup -->\n\n/assign me\n\n```\n\n\n### Project templates\n\n[Project\ntemplates](https://docs.gitlab.com/ee/user/group/custom_project_templates.html)\ncan be used to define an initial project structure for when new services are\nbeing developed. This gives a consistent starting point for projects that\ncome equipped with the latest file configurations and defaults.\n\n\n### File templates\n\n[File\ntemplates](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html)\nare similar to project templates but are default files to choose from when\nadding a new file to your repository. The team then can quickly choose from\nfiles that have best practices baked in and organization defaults.\n\n\n## Defining a Pipeline Center of Excellence project for CI/CD workflows\n\n\nAs you 'productionize' your CI/CD workflows, it’s recommended to create a\n“Pipeline Center of Excellence” project that contains templates, containers,\nor other abstracted constructs that can be adopted throughout the\norganization. This project contains file or CI/CD templates that have the\nbest practices or well-formed workflows defined for development teams to\nquickly adopt (includes) without recreating the wheel. To explore this in\npractice, visit [Pipeline\nCOE](https://gitlab-org.gitlab.io/professional-services-automation/pipelinecoe/pipeline-templates/#/)\ndocumentation written by the GitLab Professional Services team.\n\n\nHave a reusable component to suggest or that we missed? Add a comment to\nthis blog post or suggest a change to this file!\n\n\n## Related posts\n\n- [How to keep up with CI/CD best\npractices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/)\n\n- [How to become more productive with GitLab\nCI](https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci/)\n\n- [A visual guide to GitLab CI/CD\ncaching](https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching/)\n\n\nCover image by [Federico\nBeccari](https://unsplash.com/@federize?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://www.unsplash.com).\n",[721,9,765,789],{"slug":1565,"featured":6,"template":701},"keeping-your-development-dry","content:en-us:blog:keeping-your-development-dry.yml","Keeping Your Development Dry","en-us/blog/keeping-your-development-dry.yml","en-us/blog/keeping-your-development-dry",{"_path":1571,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1572,"content":1578,"config":1584,"_id":1586,"_type":14,"title":1587,"_source":16,"_file":1588,"_stem":1589,"_extension":19},"/en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"title":1573,"description":1574,"ogTitle":1573,"ogDescription":1574,"noIndex":6,"ogImage":1575,"ogUrl":1576,"ogSiteName":686,"ogType":687,"canonicalUrls":1576,"schema":1577},"France's .fr domain manager selects GitLab for security","Afnic looks to The One DevOps Platform to modernize its software development with automation, security and compliance, and support for multi-cloud environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667869/Blog/Hero%20Images/afniclogo.png","https://about.gitlab.com/blog/manager-of-frances-fr-domain-selects-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":1579,"description":1574,"authors":1580,"heroImage":1575,"date":1581,"body":1582,"category":1103,"tags":1583},"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities",[1185],"2022-05-19","Association Française pour le Nommage Internet en Coopération ([Afnic](https://www.afnic.fr/en/)) is a longstanding nonprofit in France that manages .fr domain names. Chosen 20 years ago by the French State to operate the .fr country code top-level domain, Afnic’s motto is “reliability first.” Afnic uses GitLab, The One DevOps Platform, to help sustain that motto through modernization of its software development environment.\n\nAfnic’s mission as the French National Top Level Domain Registry is to bring together public authorities, Internet users, and domain name professionals to build a secure and stable Internet, open to innovation and in which the French Internet community plays a leading role. Outages of such a digital service could prevent the provisioning of other services that rely on it and could thus have an impact on key economic and societal activities.\n\nAfnic started using GitLab about four years ago to build and secure the brand-new version of its Shared Registry System (SRS). The SRS is a platform that manages the domain names from the subscription of a domain name to the publication in the DNS database and all the updates during its life, including contacts, server names, and DNSSEC keys, according to Richard Coffre, Afnic’s principal product manager.\n\nSince the project began, all the technologies have changed. Previously, Afnic’s team was mainly using Java and Perl and now they use [Kubernetes](/solutions/kubernetes/), Angular, the latest version of Java, and Docker, among others. Security is paramount, and the team is using private clouds. That means Afnic has its own data centers in France and in colocation facilities all over the world.\n\n## Modernizing software development with automation and integration\n\nAfnic selected GitLab to automate and integrate processes during the deployment process. Previously, the majority of things were done manually and now Afnic’s team wants to follow [DevSecOps philosophy and governance](/topics/devsecops/). They wanted one DevOps platform with state-of-the-art [CI/CD](/topics/ci-cd/) capabilities, the ability to quickly onboard new developers, and features to improve compliance and monitoring functionality.\n\nNow, Gitlab is one of the core components of Afnic’s systems.\n\nThe company’s use of GitLab expanded as they deployed new versions of Java and Docker and other technologies. “We wanted to take a big step to align our technology with the state of the market,” Coffre says, and after surveying the development team, the choice was GitLab.\n\nThe team is integrating GitLab with Jira, which is providing a lot of value, he adds.\n\nNow, in addition to developers, Afnic’s database administrators and network administrators use GitLab. The team is using Docker for images and Ansible. Jira is used for ticketing issues and is linked to GitLab and Confluence as a wiki to create the documentation.\n\n## What GitLab brings to the table\n\nThe goal for Afnic is to increase automation and to have everything in the same place and for anyone to be able to get at the proper version anytime. “That's the strength of GitLab,\" Coffre says. “That's also why we chose it because it's one of the leaders. Like many modern source code management systems, GitLab allows our developers to concurrently create source code. But it does it easily, giving us the possibility to do it safely, remembering our motto.\"\n\nPreviously Afnic used only open source tools that they had to customize, which Coffre says was not efficient on a daily basis. To manage source code properly, the team syncs it to GitLab. The strong focus on community contributions “is a guarantee that its features match the developers’ needs, especially regarding CI/CD,” he adds. \n\nWhen new developers join Afnic, it is very easy to onboard them to GitLab, he says. Another benefit is the cost savings because developers don’t lose source code. There is a time-saving metric, too, because if there is an issue in GitLab, it just requires someone to patch it. \n\nNow developers can focus on higher-value strategic tasks like security and vulnerability compliance, and not manual tests and delays, etc. That frees up developers to focus on their job managing DNS databases because the GitLab platform manages the software development lifecycle end-to-end. Coffre says, “GitLab will provide the foundational platform for all Afnic’s software products moving forward. We have experienced great benefits so far and we are excited to expand our use of this platform into the future”.",[765,787,721,9],{"slug":1585,"featured":6,"template":701},"manager-of-frances-fr-domain-selects-gitlab","content:en-us:blog:manager-of-frances-fr-domain-selects-gitlab.yml","Manager Of Frances Fr Domain Selects Gitlab","en-us/blog/manager-of-frances-fr-domain-selects-gitlab.yml","en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"_path":1591,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1592,"content":1598,"config":1603,"_id":1605,"_type":14,"title":1606,"_source":16,"_file":1607,"_stem":1608,"_extension":19},"/en-us/blog/merge-trains-explained",{"title":1593,"description":1594,"ogTitle":1593,"ogDescription":1594,"noIndex":6,"ogImage":1595,"ogUrl":1596,"ogSiteName":686,"ogType":687,"canonicalUrls":1596,"schema":1597},"How to use merge train pipelines with GitLab","Read here an introduction on what merge trains are, how to use them and how to incorporate them to your GitLab project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667210/Blog/Hero%20Images/merge-train-explained-banner.jpg","https://about.gitlab.com/blog/merge-trains-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use merge train pipelines with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2020-12-14\",\n      }",{"title":1593,"description":1594,"authors":1599,"heroImage":1595,"date":1600,"body":1601,"category":741,"tags":1602},[976],"2020-12-14","This blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-01-20.\n{: .alert .alert-info .note}\n\n[Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful GitLab feature that empowers users to harness the potential of [pipelines for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) to the fullest and also automatically merge a series of (queued) merge requests (MRs) without breaking the target branch. However, due to the structural complexity of the concept, users are often unable to use it effectively for their projects and play it safe by restricting their usage to MRs that pose minimum or no conflict with the target branch.\n\nAs a [senior product designer for Continuous Integration (CI)](/company/team/#veethikaa), I often deconstruct certain concepts and logic for features related to CI so that I have a strong foundation of understanding when making design proposals. Recently, I had a chance to hold a discussion around a very interesting feature - merge trains — with the team. This post unpacks the concept of merge trains by explaining the difference between merge trains, pipelines for MRs, and pipelines for merge results.\n\n## Pipelines for merge requests\n\nGenerally, when a new merge request is created, a pipeline runs to check if the new changes are eligible to be merged to the target branch. This is called the pipeline for merge requests (MRs). A good practice is to only keep the necessary jobs for validating the changes at this step, so the pipeline doesn’t take a long time to complete and CI minutes are not overused. GitLab allows users to [configure the pipeline for MRs](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) by adding `rules:if: $CI_MERGE_REQUEST_IID` to the jobs they wish to run for MRs.\n\n![Pipeline for merge request](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-requests.jpg)\n\n### Pipelines for merge results\n\nMerge request pipelines verify the branch in isolation. The target branch may change several times during the lifetime of the MR, and these changes are not taken into consideration. In the time during which the pipeline for the MR runs (and succeeds), if the target branch progresses in the background and a user merges the changes to the target branch, they might eventually end up with a broken target.\n\nWhen a [pipeline for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) runs, GitLab CI performs a _pretend_ merge against the updated target branch by creating a commit on an internal ref from the source branch, and then runs a pipeline against it. This pipeline validates the result prior to merging, therefore increasing the chances of keeping the target branch green.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-results.jpg)\n\nWe should keep in mind that this pipeline does not run automatically with every update to the target branch. To learn more about this feature in detail and understand the process of enabling it in your GitLab instance, you can refer to the [official documentation on merge results](https://docs.gitlab.com/ee/ci/pipelines/merged_results_pipelines.html).\n\nHowever, if a long time has passed since the last successful pipeline ran, by the time the MR is ready to be merged, the target branch may have already changed and advanced. If we go ahead and merge your MR without re-running the pipeline for MRs, we could end up with a broken target branch. Merge trains can prevent this from happening.\n\n### About merge trains\n\nPipeline for merge results is an extremely useful feature in itself, but tracking the right slot to merge the feature branch into the target and remembering to run the pipeline manually before doing so is a lot to expect from a developer buried in tasks that involve deep logical thinking.\n\nTo tackle this complexity in workflow, GitLab introduced [the merge trains feature](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) in [GitLab Premium 12.0](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains). Merge trains allow users to capitalize on the capabilities of pipelines for merge results to automate the process of merging to the target branch with minimum chances of breaking it.\n\nWith merge trains enabled, a merge request can be added to the train, which takes care of it until merged.\nA merge train can be imagined as a queue of MRs that is automatically managed for you.\n\n#### How do merge trains work?\n\nWhen users queue up their MRs in a merge train, GitLab performs a pretend merge for each source branch on top of the previous branch in the queue, where the first branch on the train is merged against the target branch.\nBy creating a temporary commit for each of these merges, GitLab can run merged result pipelines.\nThe first MR in the queue, after having a successful pipeline run for MRs, gets merged to the target branch.\n\nEvery time a merge request is merged into the target branch, the pipelines for the newly added MRs in the train would run against the target branch and the newly added changes from the recently merged MR and changes that are from MRs already in the train.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-working.gif)\n\nMerge trains carry an immense possibility for innovation with GitLab as a toolchain. But to be able to build upon the concept, it is imperative to have a holistic understanding of the same at the system level.\n\nHopefully, this post does the job of breaking down the concept into layman's terms, thereby opening doors for future collaboration within [stage groups](https://handbook.gitlab.com/handbook/product/categories/) at GitLab.\n\nHave suggestions around improving merge trains? please leave your thoughts on this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5122).\n",[721,9,917,765,853],{"slug":1604,"featured":6,"template":701},"merge-trains-explained","content:en-us:blog:merge-trains-explained.yml","Merge Trains Explained","en-us/blog/merge-trains-explained.yml","en-us/blog/merge-trains-explained",{"_path":1610,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1611,"content":1617,"config":1623,"_id":1625,"_type":14,"title":1626,"_source":16,"_file":1627,"_stem":1628,"_extension":19},"/en-us/blog/migrate-from-jenkins-update",{"title":1612,"description":1613,"ogTitle":1612,"ogDescription":1613,"noIndex":6,"ogImage":1614,"ogUrl":1615,"ogSiteName":686,"ogType":687,"canonicalUrls":1615,"schema":1616},"How we're improving migrations from Jenkins to GitLab CI/CD","Learn more about our Jenkins Importer category and see what's in the works for easier Jenkins migrations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679556/Blog/Hero%20Images/insights.png","https://about.gitlab.com/blog/migrate-from-jenkins-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we're improving migrations from Jenkins to GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-12-08\",\n      }",{"title":1612,"description":1613,"authors":1618,"heroImage":1614,"date":1620,"body":1621,"category":1447,"tags":1622},[1619],"Chrissie Buchanan","2020-12-08","\nTeams that want to migrate from Jenkins to [GitLab CI/CD](/topics/ci-cd/) can run into roadblocks in the migration process. After all, going from a complicated plugin environment to GitLab CI/CD isn't exactly an apples to apples comparison. Teams that want to make the switch to GitLab will need help to ease the transition – so what are we doing to make that transition easier?\n\nWe created a Jenkins Importer category direction to bring together documentation and issues around improving the Jenkins migration process. We'll go over a few of the projects that are in progress and our vision for the future of Jenkins migrations.\n\n## What is the Jenkins Importer category?\n\nThe [Jenkins Importer](/direction/verify/jenkins_importer/) category is a collection of tools and documentation to help teams migrate from their Jenkins environment to GitLab CI/CD as easily as possible. Since we're a company that works [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default), we use these category direction pages for information related to upcoming products, features, and functionality, not necessarily for purchasing or planning purposes.\n\nUltimately, our goal is to make at least 80% of the automated tasks easy. Having a Jenkins Importer category helps us organize issues and epics around helping unblock teams from migrating to GitLab CI/CD. This category is currently at a \"minimal\" [level of maturity](/direction/maturity/), meaning the features might be available in the product but are not necessarily in production yet.\n\nWith our work being public, you can see our progress and make contributions or comments on these issues.\n\n## Jenkins Importer: Top priority\n\nOur main epic is about [implementing a wrapper](https://gitlab.com/groups/gitlab-org/-/epics/2779) around the Jenkinsfile Runner. A wrapper is all about creating a minimum viable change (MVC) that will enable teams to run their Jenkins stack within GitLab while they complete their migration.\n\nConverting a complicated Jenkins enterprise environment into GitLab can be especially complex. For some Jenkins users, they may have thousands of pipelines that need to be converted. The idea of a wrapper came from [a comment](https://gitlab.com/groups/gitlab-org/-/epics/2735#note_295172334) on a different issue around improving our [Jenkins migration documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html). This process can be used to run Jenkins builds in GitLab CI while migrating Jenkinsfiles to the GitLab CI/CD syntax.\n\n## Migrating from Jenkins: Other works in progress\n\nAs we continue to receive feedback from the community and [conduct research](https://gitlab.com/gitlab-org/ux-research/-/issues/765) on use cases, those findings will impact the maturity of this category. While we're focusing on a wrapper because it will have the most initial value, we have other vision items for the Jenkins Importer category as well, which are summarized below.\n\n### Importer for declarative and imperative Jenkins configuration\n\nThis [first issue is a proposal to write a tool](https://gitlab.com/gitlab-org/gitlab/-/issues/208276) that can read the newer declarative or imperative syntax (as opposed to JenkinsFiles, a Groovy DSL) and convert it to a valid `.gitlab-ci.yml` file.\n\n### Importer for scripted Jenkins configuration\n\nThis [second issue is a proposal for a translator](https://gitlab.com/gitlab-org/gitlab/-/issues/208275) that can turn scripted Jenkinsfiles written in Groovy into a YAML syntax.\n\nAt GitLab, everyone can contribute. If this category interests you and you'd like to know how we're making migrations easier, feel free to comment on the public issues. If you're interested in helping GitLab test the Jenkins wrapper, join our [public testing issue](https://gitlab.com/gitlab-org/gitlab/-/issues/215675) for instructions and to provide your feedback.\n\nLearn more about the benefits of single application CI/CD and see how GitLab and Jenkins compare head-to-head.\n\nCover image by [Kenrick Mills](https://unsplash.com/@kenrickmills?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/migrate?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[721,9],{"slug":1624,"featured":6,"template":701},"migrate-from-jenkins-update","content:en-us:blog:migrate-from-jenkins-update.yml","Migrate From Jenkins Update","en-us/blog/migrate-from-jenkins-update.yml","en-us/blog/migrate-from-jenkins-update",{"_path":1630,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1631,"content":1637,"config":1643,"_id":1645,"_type":14,"title":1646,"_source":16,"_file":1647,"_stem":1648,"_extension":19},"/en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"title":1632,"description":1633,"ogTitle":1632,"ogDescription":1633,"noIndex":6,"ogImage":1634,"ogUrl":1635,"ogSiteName":686,"ogType":687,"canonicalUrls":1635,"schema":1636},"Using Ruby 3.1 as default on GitLab SaaS Linux runners","Learn about the new image and how to ensure CI job compatibility.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670766/Blog/Hero%20Images/container-reg-cdn-blog.jpg","https://about.gitlab.com/blog/new-default-container-image-gitlab-saas-linux-runnners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-12-13\",\n      }",{"title":1638,"description":1633,"authors":1639,"heroImage":1634,"date":1640,"body":1641,"category":741,"tags":1642},"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux",[913],"2022-12-13","\nOn January 12, 2023, we will change the [default container](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) image used on GitLab Saas Runners on Linux from Ruby 2.5, which is end of life, to Ruby 3.1.\n\nIf you have specified a container image in your CI/CD job, then there is no impact to you. In other words, your GitLab SaaS CI/CD job will only run in the default container if no image is set for the job in the `.gitlab-ci.yml` pipeline file.\n\nTo check, open the log view of a CI job and note the image used. For example, if you have not added an image to your CI job on GitLab SaaS, then the job log will have the following:\n\n```\nUsing Docker executor with image ruby:2.5 ...\n\n```\n\nIf you have not set a container image in your CI job, then after this change, the job will run in a Ruby 3.1 container.\n\n## How can I check for any build issues on Ruby 3.1?\n\nWhile it is not expected that running a CI/CD job on Ruby 2.5 is incompatible with Ruby 3.1, to check, simply configure the job to run in a Ruby 3.1 container. To do so, edit the `.gitlab-ci.yml` and add the following:\n\n```\ndefault:\n  image: ruby:3.1\n```\n\n## Future plans\n\nIn addition to this change, we plan to [define](https://gitlab.com/gitlab-org/gitlab/-/issues/384992) a new container image maintenance process for GitLab SaaS Runners on Linux. The new policy aims to ensure that the default image used is updated so that it contains the latest security fixes.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n",[765,853,721,9,233],{"slug":1644,"featured":6,"template":701},"new-default-container-image-gitlab-saas-linux-runnners","content:en-us:blog:new-default-container-image-gitlab-saas-linux-runnners.yml","New Default Container Image Gitlab Saas Linux Runnners","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners.yml","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"_path":1650,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1651,"content":1657,"config":1662,"_id":1664,"_type":14,"title":1665,"_source":16,"_file":1666,"_stem":1667,"_extension":19},"/en-us/blog/new-machine-types-for-gitlab-saas-runners",{"title":1652,"description":1653,"ogTitle":1652,"ogDescription":1653,"noIndex":6,"ogImage":1654,"ogUrl":1655,"ogSiteName":686,"ogType":687,"canonicalUrls":1655,"schema":1656},"GitLab introduces new machine types for GitLab SaaS Linux Runners","GitLab SaaS now offers larger machine types for running CI jobs on Linux.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672836/Blog/Hero%20Images/multiple-machine-types-cover.png","https://about.gitlab.com/blog/new-machine-types-for-gitlab-saas-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab introduces new machine types for GitLab SaaS Linux Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-09-22\",\n      }",{"title":1652,"description":1653,"authors":1658,"heroImage":1654,"date":1659,"body":1660,"category":1146,"tags":1661},[913],"2022-09-22","Our GitLab SaaS vision is to provide a solution where you can easily choose\nand use the correct type of public cloud-hosted compute resources for your\nCI/CD jobs. In this first iteration towards achieving that vision, we are\npleased to announce that two larger compute machines are generally available\nfor GitLab SaaS Runners on Linux.\n\n\nWith these two machine types, you can now access more choices for your\nGitLab SaaS CI/CD jobs. And with 100% job isolation on an ephemeral virtual\nmachine, and security and autoscaling fully managed by GitLab, you can\nconfidently run your critical [CI/CD](/topics/ci-cd/) jobs on GitLab SaaS.\n\n\n## New machine type details\n\n\nThe new [SaaS Runners on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\nare a 2 vCPU, 8GB RAM (`saas-linux-medium-amd64`), and a 4 vCPU, 16GB RAM\n(`saas-linux-large-amd64`) machine type. These machine types, powered by the\nlatest generation of Google Compute N2D virtual machines, deliver\nsignificant performance improvements for general-purpose CI workloads. The\nmedium machine type, `saas-linux-medium-amd64`,  is available to all\nsubscriptions (Free, Premium, Ultimate). The large machine type,\n`saas-linux-large-amd64` is only available to paid plans (Premium and\nUltimate) and GitLab for Open Source program members.\n\n\nNote: If you are in a Free plan and tag a CI job with the large machine\ntype, `saas-linux-large-amd64`, you will get an error at the job level and\nthe job will not run.\n\n\n```\n\nThis job is stuck because of one of the following problems. There are no\nactive runners online, no runners for the protected branch, or no runners\nthat match all of the job's tags: saas-linux-large-amd64\n\n\n```\n\n\n## Are the new machine types right for my CI job?\n\n\nThe answer is that it depends. If the CI job is compute-intensive, you will\nlikely see a performance improvement measured by reduced build times. We ran\na series of  [Linux\nkernel](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-stress/linux-kernel)\nbuilds on the medium machine type to test the potential performance gains\nfor compute-intensive CI jobs.\n\n\n![Linux kernel build CI job execution time\nbenchmark](https://about.gitlab.com/images/blogimages/new-machine-types-gitlab-saas-linux/linux-kernel-build-runner-saas-benchmark_2022-09-22.png)\n\n\nOur testing found an average 41% reduction in CI job execution time for the\nmedium machine types compared to the baseline small machine type. We\nrecommend you experiment with the new machine types for your CI jobs to\ndetermine the right choice based on your build workflows.\n\n\n## Getting started\n\n\nTo get started with the new machine types, simply add a tag to your CI file.\nWithout the tag, a job in your pipeline will automatically run on the small\nmachine type.\n\n\n### Example pipeline configuration\n\n\nIn this example pipeline configuration, `job_001` will run on the default\nLinux SaaS Runner as no machine type tag is defined. The subsequent job,\n`job_002`, in the build stage will run on the medium machine type, and\n`job_003` will run on the large machine type. So you have flexibility within\na GitLab CI/CD pipeline to choose the right machine type for each job.\n\n\n```\n\nstages:\n  - Prebuild\n  - Build\n  - Unit Test\n\njob_001:\n stage: Prebuild\n script:\n  - echo \"this job runs on the default (small) machine type\"\n\njob_002:\n tags: [ saas-linux-medium-amd64 ]\n stage: Build\n script:\n  - echo \"this job runs on the medium machine type\"\n\njob_003:\n tags: [ saas-linux-large-amd64 ]\n stage: Unit Test\n script:\n  - echo \"this job runs on the large machine type\"\n\n```\n\n\n## Understanding the new machine types and cost factors\n\n\nYou can start using the new machine types now with the CI minutes currently\navailable in your plan. The new machine types will consume your CI minutes\nat a different rate than the default (small) machine type based on an\napplied cost factor. If you are a GitLab for Open Source program member,\nthen refer to the [cost factor documentation\npage](https://docs.gitlab.com/ee/ci/pipelines/cicd_minutes.html#cost-factor)\nfor details on how cost factors are applied to your CI/CD jobs.\n\n\n|  | saas-linux-small-amd64 |saas-linux-medium-amd64 |saas-linux-large-amd64\n|\n\n| ------ | ------ |------ |------ |\n\n| CI minutes consumed per 1 minute of build time| 1 |2|3|\n\n\nToday your CI minutes usage report on GitLab SaaS will be an aggregate of\nall of the CI minutes consumed across all the machine types you select in\nyour jobs. In this\n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/356076), we are\nworking towards adding visibility into usage by each Runner type. So you\nwill soon have more granular reporting of use across the various Runner\nclasses (Linux, Windows, macOS) and machine types we plan to offer.\n\n\n## Feedback\n\n\nAt GitLab, we value your input and use it as a critical sensing mechanism in\nplanning roadmap investments. To provide feedback on the machine types you\nneed on GitLab SaaS Runners on Linux, add a comment to the respective\ncomment thread in this\n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/373196)\n\n\nCover image by [Julian Hochgesang](https://unsplash.com/@julianhochgesang)\non [Unsplash](https://unsplash.com)\n\n{: .note}\n",[721,9,743,1146],{"slug":1663,"featured":6,"template":701},"new-machine-types-for-gitlab-saas-runners","content:en-us:blog:new-machine-types-for-gitlab-saas-runners.yml","New Machine Types For Gitlab Saas Runners","en-us/blog/new-machine-types-for-gitlab-saas-runners.yml","en-us/blog/new-machine-types-for-gitlab-saas-runners",{"_path":1669,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1670,"content":1676,"config":1681,"_id":1683,"_type":14,"title":1684,"_source":16,"_file":1685,"_stem":1686,"_extension":19},"/en-us/blog/oidc",{"title":1671,"description":1672,"ogTitle":1671,"ogDescription":1672,"noIndex":6,"ogImage":1673,"ogUrl":1674,"ogSiteName":686,"ogType":687,"canonicalUrls":1674,"schema":1675},"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform","Learn a new method to authenticate using JWT to increase the security of CI/CD workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/oidc","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-02-28\",\n      }",{"title":1671,"description":1672,"authors":1677,"heroImage":1673,"date":1678,"body":1679,"category":1103,"tags":1680},[1324],"2023-02-28","\n\nSecuring CI/CD workflows can be challenging. This blog post walks you through the problem validation, explores the JWT token technology and how it can be used with OIDC authentication, and discusses implementation challenges with authorization realms. You will learn about the current possibilities and future plans with GitLab 16.0. \n\n### Variables vs. secrets\nVariables are an efficient way to control and inject parameters into your jobs and pipelines, making managing and configuring the CI/CD workflows easier. You can read more about [how to use CI/CD variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/). An extra layer of security on top of variables to mask and protect, for now, is our “best-effort” to prevent sensitive variables from being accidentally revealed. However, variables are not a drop-in replacement for secrets. [Securing secrets natively](https://gitlab.com/gitlab-org/gitlab/-/issues/217355) is a solution that GitLab aspires to provide. Meanwhile, we recommend storing sensitive information in a dedicated secrets management solution. As a company, we will provide you abilities to integrate and retrieve secrets as part of your CI/CD workflows. \n\n## Security shifting left\nSensitive information like passwords, secret tokens, or shared IDs required to access tools and platforms need to be securely stored. They must also be highly available to their owners and the teams who use them. There are various secrets management solutions and frameworks available. They have addressed one problem but created new problems. For example: \"Which tool is right for our needs?\" More importantly, in software development: \"What's the best way to integrate this into our DevOps processes so that we're secure but still operating as efficiently as possible?\" Ignoring the security protocols in your organization is not an option. However, sensitive information should be stored as securely as possible. Something as simple as an access token stored in plain text can lead to security leaks and business incidents in the worst-case scenarios.\n\n## Initial support for JWT\nThe [JSON Web Token (JWT)](https://en.wikipedia.org/wiki/JSON_Web_Token) aims to build the integration bridge as an open standard for security claims exchange. It is a signed, short-lived, contextualized token that allows everyone to implement authentication between different products securely. The JWT consists of three parts: a header, a payload, and a signature.\n\n- The header represents the type of the token and the encryption algorithm.\n- The signature ensures that the token hasn't been altered.\n- The payload comprises a series of claims representing the information exchanged between two parties, which includes information about a GitLab user (ID, email, login) and the pipeline information (pipeline ID, job ID, environment, and more).\n\n_Example of GitLab JWT payload_\n\n```\n{\n  \"jti\": \"c82eeb0c-5c6f-4a33-abf5-4c474b92b558\",\n  \"iss\": \"gitlab.example.com\",\n  \"iat\": 1585710286,\n  \"nbf\": 1585798372,\n  \"exp\": 1585713886,\n  \"sub\": \"job_1212\",\n  \"namespace_id\": \"1\",\n  \"namespace_path\": \"mygroup\",\n  \"project_id\": \"22\",\n  \"project_path\": \"mygroup/myproject\",\n  \"user_id\": \"42\",\n  \"user_login\": \"myuser\",\n  \"user_email\": \"myuser@example.com\",\n  \"pipeline_id\": \"1212\",\n  \"pipeline_source\": \"web\",\n  \"job_id\": \"1212\",\n  \"ref\": \"auto-deploy-2020-04-01\",\n  \"ref_type\": \"branch\",\n  \"ref_protected\": \"true\",\n  \"environment\": \"production\",\n  \"environment_protected\": \"true\"\n}\n```\nUsing this information (called \"claims\"), you can implement an authentication condition where the token will get rejected if one of those claims does not match. You can use this to restrict access to only the authorized users and jobs in your pipelines.\n\nGitLab 12.10 added [initial support for JWT token-based connections](https://about.gitlab.com/releases/2020/04/22/gitlab-12-10-released/#retrieve-cicd-secrets-from-hashicorp-vault), which was later [enhanced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#use-hashicorp-vault-secrets-in-ci-jobs) with the `secrets:` keyword, as well as the `CI_JOB_JWT` predefined CI/CD variable, which is automatically injected into every job in a pipeline. This implementation was restricted to Hashicorp Vault, and users can use it to read secrets directly from the vault as part of their CI/CD workflow.\n \n### OIDC (JWT Version 2)\nThe logic we used to build the initial support for JWT opened up the possibility of connecting to other providers as well, but the first iteration was still restricted to Hashicorp Vault users.\n\nThis problem was addressed in GitLab 14.7 when we [released](https://about.gitlab.com/releases/2022/01/22/gitlab-14-7-released/#openid-connect-support-for-gitlab-cicd) the first \"Alpha\" version of JWT V2, which provided [Open ID Connect (OIDC)](https://openid.net/connect/) support for CI/CD.\n\nOIDC is an identity layer implemented on top of the JSON web token. You can securely authenticate against many products and services that implement OIDC, including AWS, GCP, and many more, making better use of the token's potential. Similar to our first JWT iteration, we added another [predefined CI/CD variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_JWT_V2` which is also automatically injected into every job in a CI/CD pipeline.\n\n### Securely store your secrets \nYour software supply chain should include everything needed to deliver and run your software. Securing your supply chain means you need to secure your software and the surrounding (cloud-native) infrastructure. In [GitLab 15.9](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/), we've added additional layers of protection to move our OIDC token from an Experiment to General Availability, increasing the security of your CI/CD workflows. \n\n\n#### Opt-in JWT token\nJSON web tokens (V1 and V2) are stored in CI/CD variables, which are injected automatically into all jobs in a CI/CD pipeline. However, it is likely most jobs in your pipeline do not need the token. In addition to the inefficiency of injecting unused tokens into all jobs in a pipeline, there is a potential security vulnerability. All it takes is one compromised job for this token to be leaked and used by an attacker to retrieve sensitive information from your organization. To minimize this risk, we've added the ability to restrict the token variable from all jobs in your pipeline and expose it only to the specific jobs that need it.\n\nTo declare the JSON web token in a job that needs it, configure the job in the `.gitlab-ci.yml` configuration file following this example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n  ...\n```\n\nYou can minimize the token exposure across your pipeline, but ensure it is available to the jobs that require it.\n\n#### Audience claim (`aud:`)\nClaims constitute the payload part of a JSON web token and represent a set of information exchanged between two parties. The JWT standard distinguishes between reserved, public, and private claims.\n\nThe audience (`aud:`) claim is a reserved claim, which identifies the audience that the JWT is intended for (the target of the token). In other words, which services, APIs, or products should accept this token. If the audience claim does not match, the token is rejected, so the audience claim is an essential part of software supply chain security.\n\nThe option to configure the audience claim is done in the CI/CD configuration when [declaring the usage of the JWT token](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#id-tokens), if we'll continue from the previous example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n        aud: \"...\" # mandatory field\n  script:\n    - my-authentication-script.sh MY_JOB_JWT….. # use the declared variables in a script\n  ```\n\nConfiguring the audience claim is mandatory for Vault users that leverage the [GitLab/Vault native integration](https://docs.gitlab.com/ee/ci/secrets/#use-vault-secrets-in-a-ci-job) (using the 'secrets:' keyword).\n\n```yaml\njob_name:\n  secrets:\n    VAULT_JWT_1: # or any other variable name\n      id_token:\n        aud: 'devs' # audience claim configuration\n    STAGING_DATABASE_PASSWORD: # VAULT_JWT_1 is the token to be used\n      vault: staging/db/password@ops\n```\n\n### Breaking changes and backward compatibility \nWe understand the increasing demand to secure your software supply chain. We recognize that many of our current users already use the JWT in what will soon be the \"old JWT method\" (V1). To mitigate this conflict, we've decided that moving to the new (OIDC) JWT method is optional until the next major release (GitLab 16.0). To use the new (OIDC) token, users must opt-in to this change from the UI settings and update the pipeline configuration, as explained in the previous sections. Users can continue using the Experiment or the \"old method\" until GitLab 16.0. (At that point, only the \"new\" (OIDC) JWT token and method will be available.)\n\nSeveral breaking changes were announced for both [Vault users](https://docs.gitlab.com/ee/update/deprecations.html#hashicorp-vault-integration-will-no-longer-use-ci_job_jwt-by-default) and [users of the JWT \"old\" methods](https://docs.gitlab.com/ee/update/deprecations.html#old-versions-of-json-web-tokens-are-deprecated). Those changes are scheduled for GitLab 16.0.\n\n## Three ways to use the JWT token\nThere are three ways to use a JWT to authenticate against different products in your CI/CD pipeline:\n- The \"old\" method, using the `secrets:` keyword and the `CI_JOB_JWT` variable, which is mainly used to integrate with Hashicorp Vault.\n- An \"Alpha\" version that uses the `CI_JOB_JWT_V2` OIDC token to integrate with different cloud providers.\n- A production-ready OIDC token, which is a secured version of the `CI_JOB_JWT_V2` token, used to authenticate with a variety of different products, like Vault, GCP, AWS, and so on.\n\nAll three methods are available until the next major version (GitLab 16.0). At that point, only the secured OIDC token will be available.\n\nTo prepare for this change, you should:\n\n1. Configure your pipelines to use the fully configurable and more secure [id_token](https://docs.gitlab.com/ee/ci/yaml/index.html#id_tokens) keyword.\n2. Enable the [Limit JSON Web Token (JWT) access setting](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#enable-automatic-id-token-authentication), which prevents the old tokens from being exposed to any jobs. (This setting will be permanently enabled for all projects in GitLab 16.0).\n3. If you use GitLab/Hashicorp native integration (using the [secrets:vault](https://docs.gitlab.com/ee/ci/yaml/#secretsvault) keyword), ensure the bound audience is prefixed with `https://`.\n\nThis should ensure a smooth transition to [GitLab 16.0](/upcoming-releases/) without breaking your existing workflows.\n\n\n",[766,789,721,9],{"slug":1682,"featured":6,"template":701},"oidc","content:en-us:blog:oidc.yml","Oidc","en-us/blog/oidc.yml","en-us/blog/oidc",{"_path":1688,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1689,"content":1695,"config":1702,"_id":1704,"_type":14,"title":1705,"_source":16,"_file":1706,"_stem":1707,"_extension":19},"/en-us/blog/parent-child-vs-multi-project-pipelines",{"title":1690,"description":1691,"ogTitle":1690,"ogDescription":1691,"noIndex":6,"ogImage":1692,"ogUrl":1693,"ogSiteName":686,"ogType":687,"canonicalUrls":1693,"schema":1694},"CI/CD patterns with parent-child and multi-project pipelines","Parent-child pipelines inherit a lot of the design from multi-project pipelines, but they also have differences that make them unique.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659961/Blog/Hero%20Images/parent-child-multi-project-pipelines-unsplash.jpg","https://about.gitlab.com/blog/parent-child-vs-multi-project-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Breaking down CI/CD complexity with parent-child and multi-project pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Pitino\"}],\n        \"datePublished\": \"2022-02-22\",\n      }",{"title":1696,"description":1691,"authors":1697,"heroImage":1692,"date":1699,"body":1700,"category":741,"tags":1701},"Breaking down CI/CD complexity with parent-child and multi-project pipelines",[1698],"Fabio Pitino","2022-02-22","Software requirements change over time. Customers request more features and\nthe application needs to scale well\n\nto meet user demands. As software grows in size, so does its complexity, to\nthe point where we might decide that it's\n\ntime to split the project up into smaller, cohesive components.\n\n\nAs we proceed to tackle this complexity we want to ensure that our CI/CD\npipelines continue to validate\n\nthat all the pieces work correctly together.\n\n\nThere are two typical paths to splitting up software projects:\n\n\n- **Isolating independent modules within the same repository**: For example,\nseparating the UI from the backend,\n  the documentation from code, or extracting code into independent packages.\n- **Extracting code into a separate repository**: For example, extracting\nsome generic logic into a library, or creating\n  independent microservices.\n\nWhen we pick a path for splitting up the project, we should also adapt the\nCI/CD pipeline to match.\n\n\nFor the first path, [GitLab CI/CD](/topics/ci-cd/) provides [parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nas a feature that helps manage complexity while keeping it all in a\nmonorepo.\n\n\nFor the second path, [multi-project\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\n\nare the glue that helps ensure multiple separate repositories work together.\n\n\nLet's look into how these two approaches differ, and understand how to best\nleverage them.\n\n\n## Parent-child pipelines\n\n\nIt can be challenging to maintain complex CI/CD pipeline configurations,\nespecially when you need to coordinate many jobs that may relate\n\nto different components, while at the same time keeping the pipeline\nefficient.\n\n\nLet's imagine we have an app with all code in the same repository, but split\ninto UI and backend components. A \"one-size-fits-all\" pipeline for this app\nprobably would have all the jobs grouped into common stages that cover all\nthe components. The default is to use `build`, `test`, and `deploy` stages.\n\nUnfortunately, this could be a source of inefficiency because the UI and\nbackend represent two separate tracks of the pipeline.\n\nThey each have their own independent requirements and structure and likely\ndon't depend on each other.\n\nThe UI might not need the `build` stage at all, but it might instead need a\n`system-test` stage with jobs that test the app end-to-end.\n\nSimilarly, the UI jobs from `system-test` might not need to wait for backend\njobs to complete.\n\n\n[Parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nhelp here,\n\nenabling you to extract cohesive parts of the pipeline into child pipelines\nthat runs in isolation.\n\n\nWith parent-child pipelines we could break the configurations down into two\nseparate\n\ntracks by having two separate jobs trigger child pipelines:\n\n\n- The `ui` job triggers a child pipeline that runs all the UI jobs.\n\n- The `backend` job triggers a separate child pipeline that runs all the\nbackend jobs.\n\n\n```yaml\n\nui:\n  trigger:\n    include: ui/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [ui/*]\nbackend:\n  trigger:\n    include: backend/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [backend/*]\n```\n\n\nThe modifier `strategy: depend`, which is also available for multi-project\npipelines, makes the trigger job reflect the status of the\n\ndownstream (child) pipeline and waits for it to complete. Without `strategy:\ndepend` the trigger job succeeds immediately after creating the downstream\npipeline.\n\n\nNow the frontend and backend teams can manage their CI/CD configurations\nwithout impacting each other's pipelines. In addition to that, we can now\nexplicitly visualize the two workflows.\n\n\n![example parent-child\npipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/parent-child.png){:\n.shadow.medium.center}\n\n\nThe two pipelines run in isolation, so we can set variables or configuration\nin one without affecting the other. For example, we could use\n`rules:changes` or `workflow:rules` inside `backend/.gitlab-ci.yml`, but use\nsomething completely different in `ui/.gitlab-ci.yml`.\n\n\nChild pipelines run in the same context of the parent pipeline, which is the\ncombination of project, Git ref and commit SHA. Additionally, the child\npipeline inherits some information from the parent pipeline, including Git\npush data like `before_sha`, `target_sha`, the related merge request, etc.\n\nHaving the same context ensures that the child pipeline can safely run as a\nsub-pipeline of the parent, but be in complete isolation.\n\n\nA programming analogy to parent-child pipelines would be to break down long\nprocedural code into smaller, single-purpose functions.\n\n\n## Multi-project pipelines\n\n\nIf our app spans across different repositories, we should instead leverage\n[multi-project\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\nEach repository defines a pipeline that suits the project's needs. Then,\nthese standalone and independent pipelines can be chained together to create\nessentially a much bigger pipeline that ensures all the projects are\nintegrated correctly.\n\n\nThere can be endless possibilities and topologies, but let's explore a\nsimple case of asking another project\n\nto run a service for our pipeline.\n\n\nThe app is divided into multiple repositories, each hosting an independent\ncomponent of the app.\n\nWhen one of the components changes, that project's pipeline runs.\n\nIf the earlier jobs in the pipeline are successful, a final job triggers a\npipeline on a different project, which is the project responsible for\nbuilding, running smoke tests, and\n\ndeploying the whole app. If the component pipeline fails because of a bug,\nthe process is interrupted and there is no\n\nneed to trigger a pipeline for the main app project.\n\n\nThe component project's pipeline:\n\n\n```yaml\n\nbuild:\n  stage: build\n  script: ./build_component.sh\n\ntest:\n  stage: test\n  script: ./test_component.sh\n\ndeploy:\n  stage: deploy\n  trigger:\n    project: myorg/app\n    strategy: depend\n```\n\n\nThe full app project's pipeline in `myorg/app` project:\n\n\n```yaml\n\nbuild:\n  stage: build\n  script: ./build_app.sh  # build all components\n\nqa-test:\n  stage: test\n  script: ./qa_test.sh\n\nsmoke-test:\n  stage: test\n  script: ./smoke_test.sh\n\ndeploy:\n  stage: deploy\n  script: ./deploy_app.sh\n```\n\n\n![example multi-project\npipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/multi-project.png){:\n.shadow.center}\n\n\nIn our example, the component pipeline (upstream) triggers a downstream\nmulti-project pipeline to perform a service:\n\nverify the components work together, then deploy the whole app.\n\n\nA programming analogy to multi-project pipelines would be like calling an\nexternal component or function to\n\neither receive a service (using `strategy:depend`) or to notify it that an\nevent occurred (without `strategy:depend`).\n\n\n## Key differences between parent-child and multi-project pipelines\n\n\nAs seen above, the most obvious difference between parent-child and\nmulti-project pipelines is the project\n\nwhere the pipelines run, but there are are other differences to be aware of.\n\n\nContext:\n\n\n- Parent-child pipelines run on the same context: same project, ref, and\ncommit SHA.\n\n- Multi-project pipelines run on completely separate contexts. The upstream\nmulti-project pipeline can indicate [a ref to\nuse](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html),\nwhich can indicate what version of the pipeline to trigger.\n\n\nControl:\n\n\n- A parent pipeline _generates_ a child pipeline, and the parent can have a\nhigh degree of control over what the child pipeline\n  runs. The parent can even [dynamically generate configurations for child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n- An upstream pipeline _triggers_ a downstream multi-project pipeline. The\nupstream (triggering) pipeline does not have much control over the structure\nof the downstream (triggered) pipeline.\n  The upstream project treats the downstream pipeline as a black box.\n  It can only choose the ref to use and pass some variables downstream.\n\nSide-effects:\n\n\n- The final status of a parent pipeline, like other normal pipelines,\naffects the status of the ref the pipeline runs against. For example, if a\nparent pipeline fails on the `main` branch, we say that `main` is broken.\n  The status of a ref is used in various scenarios, including [downloading artifacts](https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive) from the latest successful pipeline.\n\n  Child pipelines, on the other hand, run on behalf of the parent pipeline, and they don't directly affect the ref status. If triggered using `strategy: depend`, a child pipeline affects the status of the parent pipeline.\n  In turn, the parent pipeline can be configured to fail or succeed based on `allow_failure:` configuration on the job triggering the child pipeline.\n- A multi-project downstream pipeline may affect the status of the upstream\npipeline if triggered using `strategy: depend`,\n  but each downstream pipeline affects the status of the ref in the project they run.\n- Parent and child pipelines that are still running are all automatically\ncanceled if interruptible when a new pipeline is created for the same ref.\n\n- Multi-project downstream pipelines are not automatically canceled when a\nnew upstream pipeline runs for the same ref. The auto-cancelation feature\nonly works within the same project.\n  Downstream multi-project pipelines are considered \"external logic\". They can only be auto-canceled when configured to be interruptible\n  and a new pipeline is triggered for the same ref on the downstream project (not the upstream project).\n\nVisibility:\n\n\n- Child pipelines are not directly visible in the pipelines index page\nbecause they are considered internal\n  sub-components of the parent pipeline. This is to enforce the fact that child pipelines are not standalone and they are considered sub-components of the parent pipeline.\n  Child pipelines are discoverable only through their parent pipeline page.\n- Multi-project pipelines are standalone pipelines because they are normal\npipelines, but just happen to be triggered by an another project's pipeline.\nThey are all visible in the pipeline index page.\n\n\n## Conclusions\n\n\nParent-child pipelines inherit a lot of the design from multi-project\npipelines, but parent-child pipelines have differences that make them a very\nunique type\n\nof pipeline relationship.\n\n\nSome of the parent-child pipelines work we at GitLab will be focusing on is\nabout surfacing job reports generated in child pipelines as merge request\nwidgets,\n\ncascading cancelation and removal of pipelines as well as passing variables\nacross related pipelines.\n\nSome of the parent-child pipeline work we at GitLab plan to focus on relates\nto:\n\n\n- Surfacing job reports generated in child pipelines in merge request\nwidgets.\n\n- Cascading cancelation down to child pipelines.\n\n- Cascading removal down to child pipelines.\n\n- Passing variables across related pipelines.\n\n\nYou can check [this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/336884) for planned\nfuture developments on parent-child and multi-project pipelines.\n\nLeave feedback or let us know how we can help.\n\n\nCover image by [Ravi\nRoshan](https://unsplash.com/@ravi_roshan_inc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n",[721,9,917],{"slug":1703,"featured":6,"template":701},"parent-child-vs-multi-project-pipelines","content:en-us:blog:parent-child-vs-multi-project-pipelines.yml","Parent Child Vs Multi Project Pipelines","en-us/blog/parent-child-vs-multi-project-pipelines.yml","en-us/blog/parent-child-vs-multi-project-pipelines",{"_path":1709,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1710,"content":1716,"config":1721,"_id":1723,"_type":14,"title":1724,"_source":16,"_file":1725,"_stem":1726,"_extension":19},"/en-us/blog/pipeline-editor-overview",{"title":1711,"description":1712,"ogTitle":1711,"ogDescription":1712,"noIndex":6,"ogImage":1713,"ogUrl":1714,"ogSiteName":686,"ogType":687,"canonicalUrls":1714,"schema":1715},"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline","The Pipeline Editor reduces the complexity of configuring your CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665961/Blog/Hero%20Images/image_cover.jpg","https://about.gitlab.com/blog/pipeline-editor-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-02-22\",\n      }",{"title":1711,"description":1712,"authors":1717,"heroImage":1713,"date":1718,"body":1719,"category":741,"tags":1720},[1324],"2021-02-22","This blog post was originally published on the GitLab Unfiltered blog. It\nwas reviewed and republished on 2021-03-02.\n\n{: .note .alert-info .text-center}\n\n\nIn GitLab 13.8, we introduced the first iteration of the [Pipeline\nEditor](/releases/2021/01/22/gitlab-13-8-released/): a dedicated editor\ndesigned for authoring your CI/CD. It is your one-stop shop for everything\nyou need to configure your CI/CD pipelines.\n\n\n## Why do we need a dedicated editor for pipelines?\n\n\nGitLab's advanced syntax provides a high degree of customization for\nsophisticated and demanding CI/CD use cases. However, all of this power and\nflexibility comes with a fair bit of complexity. The Pipeline Editor helps\nyou mitigate this challenge and serves as a single solution that groups all\nexisting CI authoring features in a single location. It is our foundation,\nand we plan to build on it with enhancements in future iterations. \n\n\n## Getting started\n\n\nIn order for the pipeline editor to work, you'll first need to create a\n`.gitlab-ci.yml` file in your project. The `.gitlab-ci.yml` is a [YAML\nfile](https://en.wikipedia.org/wiki/YAML) where you configure specific\nGitLab CI/CD instructions. Check out how we are working on [improving the\nfirst-time experience of creating a `.gilab-ci.yml` file directly from the\nPipeline Editor](https://gitlab.com/groups/gitlab-org/-/epics/5276). \n\n\n### Continuous validation\n\nOnce you have created the `.gitlab-ci.yml` file and navigated to it in the\nPipeline Editor, you can begin editing your configuration. Writing YAML can\nbe error prone. No matter how technical or skilled you are, programming\nmistakes happen. Sometimes an indentation will be missed, the incorrect\nsyntax is used, or the wrong keyword is selected, and that's OK! As you\nstart authoring your pipeline, GitLab will inspect the pipeline\nconfiguration using our linting APIs and provide you with an indicator of\nwhether your pipeline configuration is valid or not. We will continuously\nvalidate your pipeline without making any changes to your pipeline\nconfiguration, so you can have confidence in hitting \"merge\" and running\nyour pipeline without any surprises. \n\n\n![Continuous validation of\npipelines](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image1.png){:\n.shadow.medium.center}\n\nContinuous validation of your pipelines\n\n{: .note.text-center}\n\n\n### Pipeline visualizer: Seeing is believing\n\nIt's practically impossible to envision what a pipeline should look like\nwhen you start writing from a blank YAML file. Luckily, GitLab provides you\nwith a full pipeline view for every running pipeline. But, what if you want\nto visualize your pipeline _before_ they begin to run? Well, you can do that\nnow by navigating to the \"Visualize\" tab in the Pipeline Editor. You'll find\nan illustration that shows how your pipeline should look as you write it,\nsimilar to the linter, and GitLab will display the visual before making any\ncommits, before running, or before altering your pipeline in any way.\n\n\nIn the visualization, we will group all your defined pipeline jobs by stages\nand add links between the jobs based on the\n[needs](https://docs.gitlab.com/ee/ci/yaml/#needs) relationships you've\nconfigured.\n\n\nIf we take a look at the example below, you can easily see that I've\nconfigured a three-stage pipeline, where the build stage has three jobs\n(step 1-3), and that step 4 needs steps 1 and 3.\n\n\n![Pipeline editor\noverview](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image2.png){:\n.shadow.medium.center}\n\nPipeline visualizer\n\n{: .note.text-center}\n\n\nHere is what the YAML looks like:\n\n ```yaml\nimage: alpine:latest\n\n\nstages:\n   - test\n   - build\n   - deploy\n\nprepare:\n   script: exit 0\n   stage: test\n\nstep1:\n   script: echo testo\n   stage: build\nstep2:\n   script: echo testo\n   stage: build\nstep3:\n   script: echo testo\n   stage: build\n\nstep4:\n   needs: ['step1', 'step3']\n   script: exit 0\n   stage: deploy\n ```\n\n### View an expanded version of the CI/CD configuration\n\nWhen configuring pipelines, you use keywords like 'include' and 'extends'\noften. These keywords help break down one long pipeline configuration file\ninto multiple files, which increases readability and reduces duplication.\nUnfortunately, those keywords can make a pipeline configuration hard to\nfollow. In some configurations, a pipeline configuration file can be mostly\ncomposed of a list of other included configuration files.\n\n\nTo make the configuration easier to follow, we've added the ability to view\na version of your pipeline configuration with all of the 'includes' and\n'extends' configurations merged together as a fourth tab in the Pipeline\nEditor. Now it's much easier to understand more complex pipeline flows and\nthis simplifies the debugging process.\n\n\nPipeline configuration example:\n\n\n![pipeline\nconfiguration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image6.png){:\n.shadow.medium.center}\n\n\nThe expanded version of the pipeline configuration:\n\n\n![expanded pipeline\nconfiguration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image7.png){:\n.shadow.medium.center}\n\n\n### Lint\n\n\nThe CI lint helps you validate your pipeline configuration and provides you\nwith additional information about it. That's why we've copied the existing\nCI linter (which was well hidden in our jobs page) to the Pipeline Editor as\na third tab.\n\n\nThe linter provides you with detailed information about every job you've\nconfigured in your pipeline. For each job, it provides the\n[before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script),\n[after_script](https://docs.gitlab.com/ee/ci/yaml/#after_script), and\n[script](https://docs.gitlab.com/ee/ci/yaml/#script) fields, tags,\nenvironment names, branches it should run, and more…\n\n\nIf you look at the following example, just by looking at the linter tab\nyou'll know that the `prepare` job:\n\n* Runs in the `prepare` stage\n\n* Contains `before_script`, `script`, and `after_scripts` fields \n\n* Runs only on master \n\n* Runs upon failure\n\n* Tag as production\n\n* Has the environment set to production \n\n\n![image3](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image3.png){:\n.shadow.medium.center}\n\n\nIn this second example, you can see that the build job is a manual job that\nruns on all branches and is allowed to fail:\n\n\n![Manual build\njob](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image5.png){:\n.shadow.medium.center}\n\n\n## How the Pipeline Editor came about\n\n\nEarlier this year, we decided to split continuous integration into two\nseparate teams: [Continuous\nIntegration](/direction/verify/continuous_integration/), which is\nresponsible for improving the experience of running a CI/CD pipeline, and\n[Pipeline Authoring](/direction/verify/pipeline_composition/), responsible\nfor helping you author your pipeline. We've defined the Pipeline Authoring\nteam goal as, \"Making the authoring experience as easy as possible for both\nadvanced and novice users.\"\n\n\n![Verify\nGroups](https://about.gitlab.com/images/handbook/engineering/verify/verify_groups_banner.jpg){:\n.shadow.center}\n\n\nAs a team, we realized that a dedicated authoring area is needed to achieve\nour [ambitious roadmap](https://youtu.be/hInM7JUEH4Y) – this is when the\nPipeline Editor idea was formed. \n\n\n## Try out Pipeline Editor yourself\n\n\nThat's it! I hope you found this overview useful. To get started with GitLab\nCI, you can [try out our hosted GitLab.com solution](/free-trial/), or you\ncan [download GitLab Self-Managed](/free-trial/) and read its documentation\nfor more in-depth coverage of the functionality. \n\n\nIf you are using our Pipeline Editor, we would love it if you leave us a\nnote on our [feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/298928)! If you'd like\nto learn more about the upcoming features, feel free to read through the\n[Pipeline Editor second iteration\nepic](https://gitlab.com/groups/gitlab-org/-/epics/4814), and tag\n`@dhershkovitch` if you have any questions.\n",[721,9,765,917],{"slug":1722,"featured":6,"template":701},"pipeline-editor-overview","content:en-us:blog:pipeline-editor-overview.yml","Pipeline Editor Overview","en-us/blog/pipeline-editor-overview.yml","en-us/blog/pipeline-editor-overview",{"_path":1728,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1729,"content":1735,"config":1743,"_id":1745,"_type":14,"title":1746,"_source":16,"_file":1747,"_stem":1748,"_extension":19},"/en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"title":1730,"description":1731,"ogTitle":1730,"ogDescription":1731,"noIndex":6,"ogImage":1732,"ogUrl":1733,"ogSiteName":686,"ogType":687,"canonicalUrls":1733,"schema":1734},"GitOps & DevSecOps for production infrastructure in minutes","Unlock production-grade infrastructure and development workflows in under five minutes with Five Minute Production App: a blend of solutions offered by AWS, Hashicorp Terraform, and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/production-grade-infra-devsecops-with-five-minute-production","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":1736,"description":1731,"authors":1737,"heroImage":1732,"date":1739,"body":1740,"category":741,"tags":1741},"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes",[1738],"Sri Rangan","2021-02-24","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2021-03-10.\n\n{: .note .alert-info .text-center}\n\n\nThis is a story about achieving production-grade infrastructure in under\nfive minutes.\\\\\n\nThis is a story about achieving production-grade DevSecOps in under five\nminutes.\\\\\n\nThis is a story about achieving total convergence of GitOps in under five\nminutes.\n\n\nMy name is Sri and over the last three months and I worked closely with\nGitLab co-founder [DZ](/company/team/#dzaporozhets) in building \"Five Minute\nProduction App.\"\n\n\nThe app blends solutions offered by AWS, Hashicorp Terraform, and GitLab,\nand offers production-grade infrastructure and development workflows in\nunder five minutes.\n\n\n![Five Minute Production App\nDiagram](https://about.gitlab.com/images/blogimages/five-min-prod-01-complete-flow.png){:\n.shadow.medium.center}\n\n\nApart from the efficiencies gained from using Five Minute Production App,\nyou benefit by achieving stateful, production-ready infrastructure on the\nAWS hypercloud.\n\n\nWe started with AWS first, as it is the hypercoud leader today. Support for\nAzure and Google Cloud is on the roadmap.\n\n\nOur vision and design decisions are explained in the\n[README](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#quickly).\n\n\n## Quickstart \n\n\nWe start with your GitLab project which has the source code of your web\napplication. Regardless of which language or framework you use, your web\napplication is packaged as a container image and stored within your GitLab\nproject's Container Registry.\n\nThis is the Build stage.\n\n\nThis is followed by the Provision stage where Terraform scripts connect to\nAWS and create a secure environment for your web application.\n\nThe environments provisioned relate to your Git branching workflow.\n\nLong-lived Git branches create long-lived environments, and short-lived Git\nbranches correspond to short-lived environments.\n\n\nResources provisioned include an Ubuntu VM, scalable PostgreSQL database, a\nRedis cluster, and S3 object storage.\n\nWe consider these elements as the building blocks for majority of web\napplications, and many of these fall under AWS free tier.\n\n\nThe infra state and credentials are stored within your GitLab project's\nmanaged Terraform state.\n\n\nFinally, we reach the Deploy stage which:\n\n1. Retrieves the deployable image from the GitLab Container Registry\n\n1. Retrieves the infrastructure credentials from the Gitlab Managed\nTerraform State, and\n\n1. Proceeds to deploy your web application\n\n\nEverything is achieved by including these two lines in your `.gitlab-ci.yml`\nfile.\n\n\n```yaml\n\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\n\nLet's look at the complete process in more detail.\n\n\n![Three stages of Five Minute Production\nApp](https://about.gitlab.com/images/blogimages/five-min-prod-02-pipeline.png){:\n.shadow.medium.center}\n\nThe three stages of Five Minute Production App\n\n{: .note.text-center}\n\n\n## Build and package\n\n\nThe Build stage is where it all begins. Five Minute Production App reuses\nthe [Auto Build\nstage](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build)\nfrom the GitLab Auto DevOps pipeline.\n\n\nAuto Build builds and packages web applications that are:\n\n1. Containerized with a Dockerfile, or\n\n2. Compatible with the Cloud Native buildpack, or\n\n3. Compatible with the Heroku buildpack\n\n\nThus, web applications across multitudes of technologies are supported,\nincluding web frameworks such as Rails, Django, Express, Next.js, Spring,\netc.\n\nand programming languages including Python, Java, Node.js, Ruby, Clojure,\netc.\n\n\nOnce the Auto Build job has finished execution, the newly created container\nimage is stored as an artifact in your GitLab project's Container Registry.\n\n\n## Provision the infrastructure\n\n\nThe next step, Provision, prepares infrastructure resources in AWS.\n\nThe first requirement here is the presence of AWS credentials stored as\nCI/CD variables at the project or group level.\n\nOnce valid AWS credentials are found, a Terraform script is executed to\ngenerate resources in AWS.\n\n\nThese resources include:\n\n1. EC2 VM based on Ubuntu 20.04 LTS\n\n2. PostgreSQL database managed by AWS RDS\n\n3. Redis cluster managed by AWS ElastiCache\n\n4. S3 bucket for file storage\n\n5. Email Service credentials managed by AWS SES\n\n\nThe most critical resource is the PostgreSQL service which has daily backups\nenabled.\n\nPostgreSQL data is snapshotted if the infrastructure resource is \"destroyed\"\nthrough a manual user action via the Five Minute Production App pipeline.\n\n\nThe EC2 VM is the only service accessible publicly. Ports 22, 80 and 443 are\nexposed.\n\nEvery other resource described above is part of a secure, private network,\nhidden from the public web, accessible ony via the EC2 instance and your web\napplicable deployed there.\n\n\nThe stateful services and environments are tied to your Git branches.\\\\\n\nThis means every Git branch creates a new environment with these resource\nsets.\\\\\n\nWe don't have a preference on your Git branching and environments\nlifecycle.\\\\\n\nUse long-lived or short-lived branches as you see fit, just keep in mind\nthat long-lived branches leads to long-lived environments and short-lived\nbranches leads to short-lived environments.\n\n\n![Infrastructure resources provisioned on\nAWS](https://about.gitlab.com/images/blogimages/five-min-prod-03-infra-resources.png){:\n.shadow.medium.center}\n\nInfrastructure resources provisioned on AWS\n\n{: .note.text-center}\n\n\n## Deploy your web application\n\n\nFinally comes the Deploy stage.\n\n\nThis is where the deploy script retrieves your web application package\n(container image) from the GitLab Container Registry, then retrieves the EC2\ninstance\n\ncredentials from the GitLab Managed Terraform State, and proceeds to deploy\nthe relevant version of your web application in its environment.\n\n\nModern web applications might require additional commands being executed\nafter each deployment or after the initial deployment,\n\nand these commands can be defined as variables in your `.gitlab-ci.yml`\nfile.\n\n\nFinally, with the help of Certbot from Letsencrypt, SSL certificates are\ngenerated and configured for your web application.\n\nIf you have defined the `CERT_DOMAIN` CI/CD variable the SSL certificate\nwill be generated for your custom domain name.\n\nOtherwise the generated SSL certificate uses a dynamic URL that Five Minute\nProduction App prepares for you.\n\n\n## Conclusion\n\n\nThere we have it. A simple yet production-ready setup for your web\napplication. If you are looking for an AWS-based setup, this is ready for\nusage.\n\n\nIf you are looking for something similar but not quite Five Minute\nProduction App, this serves as an example of how to converge\ninfrastructure-as-code with software development and provide seamless\ncontinuous deployment workflows.\n\n\nIn my personal experience, this is one of the most complete examples of\nGitOps:\n\n\n1. Your application source code lives in your GitLab project\n\n2. Your infrastructure defined as code lives in your GitLab project\n\n3. Your CI/CD pipeline lives in your GitLab project\n\n4. Your infrastructure state lives in your GitLab project\n\n5. Your infrastructure secrets and credentials live in your GitLab project\n\n6. Your environments configuration lives in your GitLab project\n\n\nThis complete GitOps convergence is not specifically configured for one\nproject. It can be included as a template from multiple projects.\n\nThere is no reason why the GitLab project in your organization cannot be the\nsingle source of truth for everything.\n\n\n### Links\n\n\n- [Five Minute Production\nApp](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md)\n\n- [Reference\nExamples](https://gitlab.com/gitlab-org/5-minute-production-app/examples)\n\n\n### About the author\n\n\n[Sri Rangan](mailto:sri@gitlab.com), an Enterprise Solutions Architect with\nGitLab, is a core-contributor and maintainer\n\nof [Five Minute Production\nApp](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md).\n",[721,9,765,1001,550,1742],"production",{"slug":1744,"featured":6,"template":701},"production-grade-infra-devsecops-with-five-minute-production","content:en-us:blog:production-grade-infra-devsecops-with-five-minute-production.yml","Production Grade Infra Devsecops With Five Minute Production","en-us/blog/production-grade-infra-devsecops-with-five-minute-production.yml","en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"_path":1750,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1751,"content":1757,"config":1763,"_id":1765,"_type":14,"title":1766,"_source":16,"_file":1767,"_stem":1768,"_extension":19},"/en-us/blog/public-project-minute-limits",{"title":1752,"description":1753,"ogTitle":1752,"ogDescription":1753,"noIndex":6,"ogImage":1754,"ogUrl":1755,"ogSiteName":686,"ogType":687,"canonicalUrls":1755,"schema":1756},"Changes to GitLab.com public project CI/CD minute quotas","How cryptomining has shaped our pipeline consumption visibility approach and our forward-looking changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666275/Blog/Hero%20Images/ci_minutes.jpg","https://about.gitlab.com/blog/public-project-minute-limits","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Changes to GitLab.com public project CI/CD minute quotas\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2021-11-11\",\n      }",{"title":1752,"description":1753,"authors":1758,"heroImage":1754,"date":1760,"body":1761,"category":808,"tags":1762},[1759],"Jackie Porter","2021-11-11","\nIn the upcoming milestones, we will be extending CI/CD minute usage quotas to GitLab.com public projects not part of GitLab [open source programs](/handbook/marketing/developer-relations/community-programs/opensource-program/).\n\nIn the first half of 2021, GitLab.com and other CI/CD providers faced a large uptick in the abuse of free pipeline minutes to mine for cryptocurrencies. To discourage and reduce abuse, we [annouced](/blog/prevent-crypto-mining-abuse/) several changes to help ensure service continued to be reliable for our customers and users. The most recent change was made on 2021-07-17 to \"include public projects in pipeline minutes quota for free users.\"\n\n### Why add a usage quota to public projects?\n\nPreviously, GitLab provided public projects on GitLab.com with a very high number of shared runner minutes to facilitate community contributions to open source. This means cryptominers are able to use GitLab.com shared runners to process large quantities of pipelines and consume an inordinate amount of minutes. These behaviors negatively impact the performance and the availability of GitLab.com shared runners. By adding a quota to the pipelines, the abusers are no longer able to process and consume minutes because there is a limit in place.\n\n[CI/CD minute quotas](https://docs.gitlab.com/ee/subscriptions/gitlab_com/index.html#ci-pipeline-minutes) enable CI/CD minute accumulation, which also gives you transparency into pipeline minute billing. Accumulation of CI/CD minutes in GitLab empowers you to make informed decisions about how to optimize your pipelines and CI/CD usage.\n\n### How does this usage quota impact you?\n\n* **Self-managed users** are not impacted by these changes as CI/CD minutes are only relevant for GitLab.com users.\n\n* **Members of GitLab's [open source program](/handbook/marketing/developer-relations/community-programs/opensource-program/)** [are not subject to the new quotas](https://gitlab.com/groups/gitlab-org/-/epics/6895) for public project CI/CD minutes. As noted in the [program description](https://about.gitlab.com/solutions/open-source/), `Features of [GitLab Ultimate](/pricing/ultimate/), including 50,000 CI/CD minutes, are free to qualifying open source projects through the GitLab for Open Source Program.`, calculated at a [program-specific cost factor](https://docs.gitlab.com/ee/ci/pipelines/cicd_minutes.html#cost-factor).\n\n* **All other GitLab.com public project users** (who account for 5% of our usage) will receive a notification email when they reach their CI/CD quota. Namespace owners will then have the option of upgrading the account to a higher [plan](https://about.gitlab.com/pricing/) or [purchasing additional CI/CD minutes](https://docs.gitlab.com/ee/subscriptions/gitlab_com/index.html#purchase-additional-ci-minutes). Self-managed runners can still be used even when a project reaches quota limits.\n\nFor more information on CI/CD minutes and billing, please refer to the [customer FAQ](/pricing/faq-compute-minutes/).\n\n### What's Next?\n\nTo further protect GitLab.com from cryptomining abuse, in the next few months you'll notice some changes to GitLab.com [CI/CD minute quotas](https://docs.gitlab.com/ee/subscriptions/gitlab_com/index.html#ci-pipeline-minutes) and the types of projects that accumulate pipeline minutes.\n\nTo address your questions and feedback on these changes going forward, we have created a space in the [GitLab Community Forum](https://forum.gitlab.com/t/pipeline-minute-quotas-on-gitlab-com/58976), which is actively monitored by GitLab team members and product managers involved with this change.\n",[721,9,697],{"slug":1764,"featured":6,"template":701},"public-project-minute-limits","content:en-us:blog:public-project-minute-limits.yml","Public Project Minute Limits","en-us/blog/public-project-minute-limits.yml","en-us/blog/public-project-minute-limits",{"_path":1770,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1771,"content":1777,"config":1784,"_id":1786,"_type":14,"title":1787,"_source":16,"_file":1788,"_stem":1789,"_extension":19},"/en-us/blog/second-law-of-complexity-dynamics",{"title":1772,"description":1773,"ogTitle":1772,"ogDescription":1773,"noIndex":6,"ogImage":1774,"ogUrl":1775,"ogSiteName":686,"ogType":687,"canonicalUrls":1775,"schema":1776},"How pursuit of simplicity complicates container-based CI","Simplicity always has a certain player in mind - learn how to avoid antipatterns by ensuring simplicity themes do not compromise your productivity by over-focusing on machine efficiencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/second-law-of-complexity-dynamics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"When the pursuit of simplicity creates complexity in container-based CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":1778,"description":1773,"authors":1779,"heroImage":1774,"date":1780,"body":1781,"category":741,"tags":1782},"When the pursuit of simplicity creates complexity in container-based CI pipelines",[716],"2022-05-24","\n\nIn a GitLab book club, I recently read \"[The Laws of Simplicity](http://lawsofsimplicity.com/),\" a great book on a topic that has deeply fascinated me for many years. The book contains an acronym that expresses simplicity generation approaches: SHE, which stands for \"shrink, hide, embody.\" These three approaches for simplicity generation all share a common attribute: They are all creating illusions - not eliminations.\n\nI've seen this illusion repeat across many, many realms of pursuit for many years. Even in human language, vocabulary development, jargon, and acronyms all simply encapsulate worlds of complexity that still exist, but can be more easily referenced in a compact form that performs SHE on the world of concepts.\n\nAny illusion has a boundary or curtain where in front of the curtain the complexity can be dealt with by following simple rules, but, behind the curtain, the complexity must be managed by a stage manager. \n\nFor instance, when the magic show creates the spectre of sawing people in half, what appears to be a simple box is in fact an exceedingly elaborate contraption. Not only that, but the manufacturing process for an actual simple box and the sawing box are markedly different in terms of complexity. The manufacturing of complexity and its result are essentially the tradeoff for what would be the real-world complexity of actually sawing people in half and having them heal and stand up unharmed immediately afterward.\n\nTo bring this into the technical skills realm, consider that when you leverage a third-party component or API to add functionality, you only need to know the parameters to obtain the desired result. The people maintaining that component or API must know the quantum mechanics detail level of how to perform that work in a reliable and complete way.\n\nDocker containers are a mechanism for embodying complexity, and are used in scaled applications and within container-based CI. When a [CI/CD](/topics/ci-cd/) automation engineer uses container-based CI, it is possible to make things more complex and more expensive when attempting to do exactly the opposite.\n\nAt its core, this post is concerned with how it can happen that pursuing a simpler world through containers can turn into an antipattern - a reversal of desired outcomes - many times, without us noticing that the reversal is affecting our productivity. The prison of a paradigm is secure indeed.\n\n### The Second Law of Complexity Dynamics\n\nOver the years I have come to believe that the pursuit of reducing complexity has similar characteristics to [The Second Law of Thermodynamics](https://www.grc.nasa.gov/www/k-12/airplane/thermo2.html). The net result of a change between mass and energy results in the the same net amount of mass and energy, but their ratio and form have changed. In what I will coin \"The Second Law of Complexity Dynamics,\" complexity is similarly \"conserved,\" it is just reformed.\n\nIf complexity is not eliminated by simplifying efforts, we reduce its impact in a given realm by changing the ratio of complexity and simplicity on each side of one or more curtains. But alas, complexity did not die, it just hid and is now someone else's management challenge. It is important not to think of this as cheating. There is no question that hiding complexity carries the potential for massive efficiency gains when the world behind the hiding mechanisms becomes the realm of specialty skills and specialists. When it truly externalizes the complexity management for one party, the world becomes more simple for that party.\n\nHowever, the devil is in the details. If the hypothesis of \"no net elimination of complexity\" is correct, it is then important where the complexity migrates to. If it migrates to another part of the same process that must also be managed by the same people, then it may not result in a net gain of efficiency. If it migrates out of a previously embodied realm, then, in the pusuit of simplicity, we can actually reduce our overall efficiency when the process is considered as a whole.\n\n### Container-based CI pipelines as a useful case in point\n\nI see the potential for efficiency reversals to crop up in my daily work time and again, and an interesting place where I've seen it lately is in the tradeoff of linking together hyper-specialized modules of code in containers for CI versus leveraging more generalized modules.\n\nIn creating container-based pipelines, I experience the potential for an efficiency reversal I have to consciously manage.\n\nContainers make a simplicity tradeoff by design. They create a full runtime environment for a very single purpose but in doing so they strip back the container internals so far that general compute tasks are difficult inside them. If you step behind their \"complexity embodying\" curtain into the container, their simplistic environment can require more complex code to operate within.\n\nIn GitLab CI pipelines that utilize containers, all the scripts of jobs run inside the containers that are specified as their runtime environment. When one selects a specialized container - such as the alpine git container or the skopeo image management container - the code is subject to the limitations of the shell that container employs (if it has one at all).\n\nContainers were devised to be hyper-specialized, purpose-specific runtimes that assure they can always run and run quickly for scaled applications. However, for many containers this means no shell or a very stripped back shell like busybox sh. It frequently also means not including the package manager for the underlying Linux distribution.\n\nTime and again, I've found myself degrading the implementation of my shell code in key ways that make it more complex, so that it can run under these stripped back shells. In these cases, I do not benefit from the complexity hiding of newer versions of advanced shells like Bash v5. One of the areas is advanced Bash shell expansions, which embody a huge world of complex parsing and avoid a bunch of extraneous utilities. And another is advanced if and case statement comparison logic that processes regular expressions without external utilities and performs many other abstracted comparisons. There are many other areas of the language where this comes into play, but these two stand out.\n\n![](https://about.gitlab.com/images/blogimages/second-law-of-complexity-dynamics-container-pipeline-tradeoffs.png)\n\nSo by having a simpler shell like busybox sh, the simplicities of advanced shell features become *unhidden* and join my side of the curtain. Now I have to manage them in my code. But then, guess what? No package manager means the inability to install other Linux utilities and languages extensions that I could also employ to push that same complexity back out of my space. And, of course, it means installing Bash v5 would be difficult as well.\n\nSo the simplicity proposition of a tightly optimized purpose-specific container can reverse the purported efficiency gains in the very important realm of the code I have to write. It also means I frequently have to break up my code into multiple jobs to utilize the specializations of these containers in a sequence or to transport the results of a specialized container into a fuller coding environment. This increases the complexity of the pipeline as I now have to pass artifacts and variable data from one job to another with a host of additional YAML directives, and sometimes deploy infrastructure (e.g., [Runner caching](https://docs.gitlab.com/ee/ci/caching/#:~:text=For%20runners%20to%20work%20with,GitLab.com%20behave%20this%20way)).\n\nIn the case of CI using containers, when the simplicity tradeoffs move complexity to things I do not maintain, such as base containers, operating system packages, and full shell environments, into things I do maintain, such as CI YAML and Shell Script code, then I am also inheriting long-term complexity maintenance. In the cloud, we know this as undifferentiated heavy lifting.\n\nInterestingly, the proliferation of specialized containers can also require more machine resources and can lengthen processing time as containers are retrieved from registries and loaded and artifacts and source code are copied in and out of each job-based container.\n\n### Simplicity target: Efficiency\n\nIt's easy to lose sight of the amount of human effort and ingenuity being applied to knowing and managing the coding structure, rather than being applied to solving the real automation problems of the CI pipeline. The net complexity of the pipeline can also mean it is hard to maintain an understanding of it even if you are working in it every day - and for newcomers onboarding, it can be many weeks before they fully understand how the system works.\n\nOf course, I can create my own containers for CI pipelines, but now I've added the complexity of container development and continuous updates of the same in order for my pipeline code to be operational and stay healthy. I am still behind the curtain for that container. For teams whose software is not itself containerized, the prospect of learning to build containers just for CI can create a lot of understandable friction to adopting a container-based CI development process. This friction may be unnecessary if we make a key heuristic adaptation.\n\n### Walking the tightwire above the curtain\n\nSo how do I manage the tensions of these multiple worlds of complexity when it comes to container-based pipelines to try to avoid efficiency reversals in the net complexity of the pipeline?\n\nIt is simple. I will describe the method and then the key misapplied heuristic and how to adjust it.\n\n1. I hold that the primary benefits of container-based CI are a) dependency isolation by job (so that you don’t have a massive and brittle CI build machine specification to handle all possible build requirements), and b) clean CI build agent state by obtaining a clean container copy for each job. These benefits do not imply having to abide by microservices container resource planning and doing so is what creates an antipattern in my productivity.\n\n2. I frequently use a Bash 5 container (version pegged if need be) where all the complexity that advanced shell capabilities embody for me stay behind the curtain.\n\n3. Instead of running a hyper-minimalized container for a given utility, I do a runtime install of that utility (gasp!) in a container that has my rich shell. I utilize version pegging during the install if I feel version safety is paramount on the utility. Alternatively, if a very desirable runtime of some type is difficult to setup and does not have a package, I look for a container that has a package manager that matches a packaged version of the runtime and also allows me to install my advanced scripting language if needed.\n\n4. If, and only if, the net time of the needed runtime installs exceeds the net pipeline time to load a string of specialized containers (with artifact handling) plus my time to develop and manage a pipeline dependency in the form of a custom container, then do I consider possibly creating a pipeline specific container.\n\n5. Through this process a balancing principle also emerges. Since I have been doing runtime installs as a development practice, I have actually already MVPed what a pipeline specific container would need to have installed. I can literally copy the installation lines into a Docker file if I wish. I can also notice if I have commonality across multiple pipelines where it makes sense to create a multi-pipeline utility container.\n\nIn a recent project, following these principles caused me to avoid the skopeo container and instead install it on the Bash 5 container using a package manager.\n\nIf your team is big into Python or PowerShell as your CI language, it would make sense to start with recent releases of those containers. The point is not advanced Bash -but an advanced version of your general CI scripting language that prevents you from creating work arounds in your code for problems that are well-solved in publicly available runtimes.\n\nKeep in mind that this adjustment is very, very focused on containers **in CI pipelines**, which, by nature, reflect general compute processing requirements where many vastly different operations are required in a pipeline. I am not advocating this approach for true microservices applications where, by design, a given service has very defined purpose and characteristics and, at scale, massively benefits from the machine efficiency of hyper-minimalized, purpose-specific granularity.\n\n### Misapplied heuristics\n\nFrequently when a pattern has an inflection point at which it becomes an antipattern, it is due to misapplying the heuristics of the wrong realm. In this case, I believe, that normal containerization patterns for microservices apps are well founded, but they apply narrowly to \"engineered hyper-specialized compute\" of a granule we call \"a microservice\" (note the word \"micro\" applies to the scope of compute activities). Importantly, they apply because the process itself is designed as hyper-specialized around a very specific task. The container contents (included dependencies), immutability principle (no runtime change), and the runtime compute resources can be managed exceedingly minimally because of the small and highly specific scope of computing activities that occur within the process.\n\nThis is essentially the embodiment of the 12 Factor App principle called “[VIII. Concurrency](https://12factor.net/concurrency),” which asserts that scaling should be horizontal scaling of the same minimalized process, not vertical scaling of compute resources inside a given process. If the system experiences 10x work for a particular activity, we create 10 processes, we do not request 10x memory and 10x CPU within one running process. Microservices architecture tightly controls the amount of work in each request so that it is hyper-predictable in its compute resource requirements and, therefore, scalable by adding identical processes.\n\nCI compute, by nature, is the opposite of hyper-specialized. Across build, test, package, deploy, etc., etc., there are many huge variations in required machine resources of memory, CPU, network I/O and high-speed disk access and, importantly, included dependencies. The generalized compute nature also occurs due to varying inputs so the same defined process might need a lot more resources due to the nature of the raw input data. For example, varying input volume (e.g. a lot versus few data items) or varying input density (e.g. processing binary files versus text files). \n\nIt is the process that is being containerized that holds the attribute of generalized compute (bursty on at least some compute resources) or hyper-specialized (narrow definition of work to be done and therefore well-known compute resources per unit of completed work). Containerizing a process that exhibits generalized compute requirements is useful, but planning the resources of that container as if containerizing it has transformed the compute requirements into hyper-minimalized is the inflection point at which it becomes an antipattern, actually eroding the sought-after benefits we set out to create.\n\nIn the model I employ for leveraging containers in CI, the loosening of the hyper-specialization, immutablility (no-runtime installs), and very narrow compute resources principles of microservices simply reflects the real world in that CI compute as a whole exhibits the nature of generalized, not hyper-specialized, compute characteristics.\n\n> Another realm where this seems true is desired state configuration management technologies - also known as “Configuration as Code”. It is super simple if there are pre-existing components or recipes for all that you need to do but as soon as you have to build some for yourself, you enter a world of creating imperative code against a declarative API boundary (there's the \"embodiment\" curtain - the declarative API boundary). Generally, if you have not had to implement imperative code to process declaratively, this new world takes some significant experience to become proficient.\n\n### Iterating SA: Experimental improvements for your next project\n\n1. In general, favor simplicity boundaries that reduce your work, especially in the realm of undifferentiated heavy lifting. In the realm of container-based CI, this includes having a rich coding language and a package manager to acquire additional complexity embodying utilities quickly and easily.\n\n2. In general, be suspicious of an underlying antipattern if you have to spend an inordinate amount of time coding and maintaining workarounds in the service of simplicity. In the realm of container-based CI, this would be containers that are ultra-minimalized around microservices performance characteristics when they don’t hyper-scale as a standing service within CI.\n\n3. In general, stand back and examine the net complexity of the code and frameworks that will have to be maintained by yourself or your team and check if you’ve made tradeoffs that have a net negative tax on your efficiency. When complexity that can be managed by machines enters your workspace at high frequency, then you have a massive antipattern of human efficiency.\n\n4. It is frequent that when the hueristics being applied create negative human efficiency they also create negative machine efficiency. Watch for this effect in your projects. The diagram in the post shows that over-minimalized containers can easily lead to using a lot more of them - all of which has machine overhead as well.\n\nIf the above resonates, CI pipeline engineers might want to consider loosening the \"microservices\" heuristics of hyper-specialization, ultra-minimalization,  and immutability (no dynamic installs) for CI pipeline containers in order to ensure that the true net complexity level of the code they have to maintain is in balance and their productivity is preserved.\n\n### Appendix: Working examples of this idea\n\n- [AWS CLI Tools in Containers](https://gitlab.com/guided-explorations/aws/aws-cli-tools) has both Bash and PowerShell Core (on Linux OS) available so that one container set can suit the automation shell preference of both Linux and Windows heritage CI automation engineers.\n\n- CI file [installs yq dynamically](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L47-48) in the Bash container, but then [only installs the heavier jq and skopeo](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L63) if needed by the work implied, which demonstrates a way to be more efficient even when runtime installs are desired.\n\n- [Bash and PowerShell Script Code Libraries in Pure GitLab CI YAML](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/script-code-libraries-in-pure-gitlab-ci-yaml) shows how to have libraries of CI script code available to every container in a pipeline without encapsulating the libraries in a container themselves and with minimalized CI YAML complexity compared to YAML anchors, references, or extends. While the method is a little bit challenging to setup, from then on out it pays back by decoupling scripting libraries from any other pipeline artifact.\n\n- [CI/CD Extension Freemarker File Templating](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/ci-cd-plugin-extension-freemarker-file-templating) shows the install is very quick and only affects one job and still version pegs the installed utility.\n",[721,9,766,1783,744],"code review",{"slug":1785,"featured":6,"template":701},"second-law-of-complexity-dynamics","content:en-us:blog:second-law-of-complexity-dynamics.yml","Second Law Of Complexity Dynamics","en-us/blog/second-law-of-complexity-dynamics.yml","en-us/blog/second-law-of-complexity-dynamics",{"_path":1791,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1792,"content":1798,"config":1805,"_id":1807,"_type":14,"title":1808,"_source":16,"_file":1809,"_stem":1810,"_extension":19},"/en-us/blog/secure-journey-continuous-delivery",{"title":1793,"description":1794,"ogTitle":1793,"ogDescription":1794,"noIndex":6,"ogImage":1795,"ogUrl":1796,"ogSiteName":686,"ogType":687,"canonicalUrls":1796,"schema":1797},"Securing the journey to continuous delivery","The UK Dept for Work and Pensions bring security best practices to the forefront of a massive transition to continuous delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/secure-journey-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing the journey to continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-10-30\",\n      }",{"title":1793,"description":1794,"authors":1799,"heroImage":1795,"date":1801,"body":1802,"category":1447,"tags":1803},[1800],"Vanessa Wegner","2019-10-30","\n[Adam Moss](https://www.linkedin.com/in/adam-moss/?originalSubdomain=uk) is the\nHead of Engineering Strategy, Technical Leadership, DevOps, and SRE at the\nDepartment for Work and Pensions. At this year’s GitLab Commit in London, Adam\nspoke about how his organization transitioned from waterfall to Agile, and how\nthey built security into both their organization's infrastructure and culture.\n\nThe Department for Work and Pensions (DWP) is the United Kingdom’s largest\ngovernment department. It comprises 84,000 employees and serves 22 million\ncitizens, with systems containing approximately 55 million lines of code and\nseeing about 10,000 changes per year.\n\nIn other words, it’s a big deal.\n\nBut their infrastructure and operations were less than stellar. Adam and his\nteam wanted to offer 24/7 service availability, improve their user experience,\nand reduce operational costs. So, they went Agile.\n\n## Big change for big gains\n\nBefore the transformation, the DWP had outsourced services for 30 years. To get\nto [continuous delivery](/topics/continuous-delivery/), they brought everything in-house. In addition to massive\noperational change, this also required an enormous cultural shift within the\norganization. Insourcing meant taking responsibility for everything – they couldn't blame a third party should anything go wrong. Teams also had to take on an iterative mindset: Changing their standard maximum viable product into a minimum one.\n\nThen there was the question of tools, which also brought the question of\nsecurity: What tools would best enable developers, without leaving gaping holes\nin their systems?\n\n## Owning the risk\n\nAs a government organization, the DWP was used to managing risk – but they\nsuddenly found themselves without an outsourced partner to blame. Now that Adam’s\nteam was fully responsible for security efforts, they needed to become much\nmore risk averse. Taking ownership of security is also a big change for\ndevelopers, even for organizations not undergoing massive transformation.\n\n### The journey to DevOps security\n\n#### Considerations\n\nTo keep both processes and systems secure, the DWP took a multi-layered\napproach with people, devices, and code among the top aspects considered.\n\nDevelopers are often highly privileged users, which poses certain risks to your\nenvironment. While it’s necessary to protect both systems and people,\norganizations need to be clear about their security policies and intent in\norder to build and maintain employee trust. Adam puts it this way: Think about\ndisciplinary policies – if a piece of vulnerable code is released and causes a\nproblem, is it the individual’s fault? Or is it a fault of the processes you’ve\nput in place?\n\nAdam also emphasized that restrictions might not be the best answer: Developers\nwill find a way around, so it’s better to implement something that allows\nthem to achieve their objectives without creating any backdoor processes.\n\nThere was also the consideration of open source – while it provides great\nbenefits, there are challenges that must also be managed appropriately. Adam’s\nteam chose to implement continuous vulnerability monitoring (with [GitLab](/solutions/security-compliance/))\nto keep track of any risky dependencies that might spring a data leak. They\nalso chose to use GitLab as a central point of control and single source of\ntruth, increasing transparency for the organization.\n\n#### Lessons learned\n\nIn his presentation, Adam shared some valuable tips for a successful\ntransition to continuous delivery. Here are a few favorites:\n\n##### Automate, automate!\nAutomation will make things immensely easier – not just because of the time\nsaved, but also because of its repeatability and reduced risk for human error.\nFocus on the low-hanging fruit early on in the process. There will always be things you can’t\nautomate, so pick the easy battles first.\n\n##### Identify your pain point\nTake a look across your operations and organization. What is the biggest\nchallenge you can solve? Or, what change will bring a lot of value in the move\nto continuous delivery? Try to achieve ROI as soon as possible.\n\n##### Anticipate risks from an external POV\nAdam recommends threat modeling, and looking at security from the outside in.\nWhat might an adversary be thinking? Why and how might they attack? Some tools\nwill even generate possible situations that you’ve never considered.\n\n##### Continuous doesn’t always mean automatic\nWhile you may want to automate functions as much as possible, the catalyst can\nstill be human. Separation of duties can serve as a useful defense mechanism to\nensure that big changes won’t cause undue risk.\n\n## The journey doesn’t end with DevOps\n\nAdam concludes with some wisdom for the future: Always be thinking about how\nyou’re going to evolve your organization, and make sure your roadmap continues to change as well.\nHe suggests looking externally for options you might not have yet considered,\nlike the capabilities planned for [your favorite DevOps tools](/direction/#devops-stages).\n\nTo build some new ideas into your own roadmap, watch Adam’s talk from GitLab\nCommit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/c8zFXUkPb2c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[787,765,1804,9,269],"agile",{"slug":1806,"featured":6,"template":701},"secure-journey-continuous-delivery","content:en-us:blog:secure-journey-continuous-delivery.yml","Secure Journey Continuous Delivery","en-us/blog/secure-journey-continuous-delivery.yml","en-us/blog/secure-journey-continuous-delivery",{"_path":1812,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1813,"content":1819,"config":1824,"_id":1826,"_type":14,"title":1827,"_source":16,"_file":1828,"_stem":1829,"_extension":19},"/en-us/blog/simple-kubernetes-management-with-gitlab",{"title":1814,"description":1815,"ogTitle":1814,"ogDescription":1815,"noIndex":6,"ogImage":1816,"ogUrl":1817,"ogSiteName":686,"ogType":687,"canonicalUrls":1817,"schema":1818},"Simple Kubernetes management with GitLab","Follow our tutorial to provision a Kubernetes cluster and manage it with IAC using Terraform and Helm in 20 minutes or less.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simple Kubernetes management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2022-11-15\",\n      }",{"title":1814,"description":1815,"authors":1820,"heroImage":1816,"date":1821,"body":1822,"category":741,"tags":1823},[784],"2022-11-15","Kubernetes can be very complex and has dozens of tutorials out there on how\nto provision and manage a cluster. This tutorial aims to provide a simple,\nlightweight solution to provision a Kubernetes cluster and manage it with\ninfrastructure as code (IaC) using Terraform and Helm in 20 minutes or less.\n\n\n**The final product of this tutorial will be two IaC repositories with fully\nfunctional CI/CD pipelines:**\n\n\n1.\n[gitlab-terraform-k8s](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks)\n- A single source of truth to provision, configure, and manage your\nKubernetes infrastructure using Terraform\n\n1.\n[cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management)\n- A single source of truth to define the desired state of your Kubernetes\ncluster using the GitLab Agent for Kubernetes and Helm\n\n\n![Final\nProduct](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/final-product.png){:\n.shadow}\n\n\n\n### Prerequisites\n\n- AWS or GCP account with permissions to provision resources\n\n- GitLab account \n\n- Access to a GitLab Runner\n\n- 20 minutes\n\n\n### An overview of this tutorial is as follows:\n\n\n1. Set up the GitLab Terraform Kubernetes Template 🏗️\n\n2. Register the GitLab Agent 🕵️\n\n3. Add in Cloud Credentials ☁️🔑\n\n4. Set up the Kubernetes Cluster Management Template 🚧\n\n5. Enjoy your Kubernetes Cluster completely managed in code! 👏\n\n\n## Set up the GitLab Terraform Kubernetes Template\n\n\nStart by importing the example project by URL -\n[https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project)\n\n\nTo import the project:\n\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all\nprojects**.\n\n2. On the right of the page, select **New project**.\n\n3. Select **Import project**.\n\n4. Select **Repository by URL**.\n\n5. For the Git repository URL:\n\n- [GCP Google Kubernetes\nEngine](https://cloud.google.com/kubernetes-engine):\nhttps://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-gke.git\n\n- [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/):\nhttps://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks.git\n\n6. Complete the fields and select **Create project**.\n\n\n## Register the GitLab Agent\n\n\nWith your newly created **gitlab-terraform-k8s** repo, create a GitLab Agent\nfor Kubernetes:\n\n\n1. On the left sidebar, select **Infrastructure > Kubernetes clusters**.\nSelect **Connect a cluster (agent).**\n\n2. From the **Select an agent** dropdown list, select **eks-agent/gke-agent\nand select **Register an agent**.\n\n3. GitLab generates a registration token for the agent. **Securely store\nthis secret token, as you will need it later.**\n\n4. GitLab provides an address for the agent server (KAS). Securely store\nthis as you will also need it later.\n\n5. Add this to the\n**gitlab-terraform-eks/.gitlab/agents/eks-agent/config.yaml** in order to\nallow the GitLab Agent to have access to your entire group.\n\n\n```yaml\n\nci_access:\n  groups:\n    - id: your-namespace-here\n```\n\n\n![Register GitLab\nAgent](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/register-gitlab-agent.png){:\n.shadow}\n\n\n\n## Add in your Cloud Credentials to CI/CD variables\n\n\n### [AWS EKS](https://aws.amazon.com/eks/)\n\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n\n1. Set the variable **AWS_ACCESS_KEY_ID** to your AWS access key ID.\n\n2. Set the variable **AWS_SECRET_ACCESS_KEY** to your AWS secret access key.\n\n3. Set the variable **TF_VAR_agent_token** to the agent token displayed in\nthe previous task.\n\n4. Set the variable **TF_VAR_kas_address** to the agent server address\ndisplayed in the previous task.\n\n\n![Add in CI/CD\nvariables](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cicd-variables.png){:\n.shadow}\n\n\n\n### [GCP GKE](https://cloud.google.com/kubernetes-engine)\n\n\n1. To authenticate GCP with GitLab, create a GCP service account with the\nfollowing roles: **Compute Network Viewer, Kubernetes Engine Admin, Service\nAccount User, and Service Account Admin**. Both User and Admin service\naccounts are necessary. The User role impersonates the default service\naccount when creating the node pool. The Admin role creates a service\naccount in the kube-system namespace.\n\n2. **Download the JSON file** with the service account key you created in\nthe previous step.\n\n3. On your computer, encode the JSON file to base64 (replace\n/path/to/sa-key.json to the path to your key):\n\n\n```\n\nbase64 -i /path/to/sa-key.json | tr -d\n\n```\n\n\n- Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS**\nenvironment variable in the next step.\n\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON\nfile you just created.\n\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n\n7. Set the variable **TF_VAR_agent_token** to the agent token displayed in\nthe previous task.\n\n8. Set the variable **TF_VAR_kas_address** to the agent server address\ndisplayed in the previous task.\n\n\n## Run GitLab CI to deploy your Kubernetes cluster!\n\n\n![Deploy Kubernetes\ncluster](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/pipeline-view.png){:\n.shadow}\n\n\nWhen successfully completed, view the cluster in the AWS/GCP console!\n\n\n![AWS\nEKS](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/aws-eks.png){:\n.shadow}\n\n\n### You are halfway done! 👏 Keep it up!\n\n\n## Set up the Kubernetes Cluster Management Project\n\n\nCreate a project from the cluster management project template -\n[https://gitlab.com/projects/new#create_from_template](https://gitlab.com/projects/new#create_from_template)\n\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all\nprojects**.\n\n2. On the right of the page, select **New project**.\n\n3. Select **Create from template**.\n\n4. From the list of templates, next to **GitLab Cluster Management**, select\n**Use template**.\n\n5. Enter the project details. Ensure this project is created in the same\nnamespace as the gitlab-terraform-k8s project.\n\n6. Select **Create project**.\n\n7. Once the project is created on the left sidebar, select **Settings >\nCI/CD. Expand Variables**.\n\n8. Set the variable KUBE_CONTEXT to point to the GitLab Agent. For example,\n`noah-ing-demos/infrastructure/gitlab-terraform-eks:eks-agent`.\n\n\n![Set Kube\nContext](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/kube-config.png){:\n.shadow}\n\n\n\n- **Uncomment the applications you'd like to be installed** into your\nKubernetes cluster in the **helmfile.yaml**. In this instance I chose\ningress, cert-manager, prometheus, and Vault. \n\n\n![Uncomment Applications in\nhelmfile](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/helmfile.png){:\n.shadow}\n\n\nThat will trigger your **CI/CD pipeline** and it should look like this.\n\n\n![Cluster Management\nCI/CD](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cluster-management-cicd.png){:\n.shadow}\n\n\nOnce completed, **go to the AWS/GCP console** and check out all the deployed\nresources!\n\n\n![Deployed EKS\napplications](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/deployed-eks-applications.png){:\n.shadow}\n\n\n### Voila! 🎉\n\n\n## Enjoy your Kubernetes cluster completely defined in code! 👏👏👏\n\n\nNow with these two repositories you can **manage a Kubernetes cluster\nentirely through code**:\n\n\n- For managing the Kubernetes cluster's infrastructure and configuring its\nresources you can make changes to the\n[gitlab-terraform-eks](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks)\nrepository you have setup. This project has a **Terraform CI/CD pipeline**\nthat will allow you to **review, provision, configure, and manage your\nKubernetes** infrastructure with ease.\n\n\n- For managing the desired state of the Kubernetes cluster, the\n[cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management)\nrepository has a **GitLab Agent** set up and will **deploy any Kubernetes\nobjects defined in the helm files**.\n\n\n➡️ Bonus: If you'd like to deploy your own application to the Kubernetes\ncluster, then add to your **cluster-management** `helmfile` and see the\nGitLab Agent for Kubernetes roll it out with ease!\n\n\n\n## References\n\n- [Create a New GKE\nCluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html)\n\n- [Create a New EKS\nCluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html)\n\n- [Cluster Management\nProject](https://docs.gitlab.com/ee/user/clusters/management_project.html)\n\n\n\n## Related posts\n\n- [The ultimate guide to GitOps with\nGitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and\nTerraform](https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [GitOps with GitLab: Connect with a Kubernetes\ncluster](https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster/)\n",[766,959,550,721,9,765],{"slug":1825,"featured":6,"template":701},"simple-kubernetes-management-with-gitlab","content:en-us:blog:simple-kubernetes-management-with-gitlab.yml","Simple Kubernetes Management With Gitlab","en-us/blog/simple-kubernetes-management-with-gitlab.yml","en-us/blog/simple-kubernetes-management-with-gitlab",{"_path":1831,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1832,"content":1837,"config":1842,"_id":1844,"_type":14,"title":1845,"_source":16,"_file":1846,"_stem":1847,"_extension":19},"/en-us/blog/stageless-pipelines",{"title":1833,"description":1834,"ogTitle":1833,"ogDescription":1834,"noIndex":6,"ogImage":1299,"ogUrl":1835,"ogSiteName":686,"ogType":687,"canonicalUrls":1835,"schema":1836},"Write a stageless CI/CD pipeline using GitLab 14.2","With GitLab 14.2, you can write a complete CI/CD pipeline without defining any stages.","https://about.gitlab.com/blog/stageless-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write a stageless CI/CD pipeline using GitLab 14.2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-08-24\",\n      }",{"title":1833,"description":1834,"authors":1838,"heroImage":1299,"date":1839,"body":1840,"category":741,"tags":1841},[1324],"2021-08-24","\n\nGitLab CI/CD technology has historically divided a pipeline into stages based on the typical development workflow. Now that [GitLab 14.2 has launched](/releases/2021/08/22/gitlab-14-2-released/), users can speed up cycle times by using the [`needs`](https://docs.gitlab.com/ee/ci/yaml/#needs) command to write a complete CI/CD pipeline with every job in the single stage. In fact, you can omit stages completely and have a [\"stageless\" pipeline](https://about.gitlab.com/releases/2021/08/22/gitlab-14-2-released/#stageless-pipelines) that executes entirely based on the `needs` dependencies.\n\n## Understanding stages\n\nIn GitLab CI/CD, you use [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) to group jobs based on the development workflow and control the order of execution for CI/CD jobs.\n\nPipelines execute each stage in order, where all jobs in a single stage run in parallel. After a stage completes, the pipeline moves on to execute the next stage and runs those jobs, and the process continues like this until the pipeline completes or a job fails. If a job fails, the jobs in later stages don't start at all.\n\n## History of stages in GitLab CI/CD\n\nWhen we first designed GitLab CI/CD, we knew that in a continuous integration workflow you build and test software every time a developer pushes code to the repository. The use of stages in GitLab CI/CD helped establish a mental model of how a pipeline will execute. By default, stages are ordered as: `build`, `test`, and `deploy` - so all stages execute in a logical order that matches a development workflow. The first step is to build the code, and if that works, the next step is to test it. If the tests pass, then you deploy the application.\n\nOf course, you can actually create as many stages as you like and order them as desired. We also introduced the `.pre` and `.post` stages which are predefined stages that let you set certain jobs to always run at the beginning (`.pre`) or end (`.post`) of your pipeline. GitLab CI/CD used stages for the past few years.\n\n## Starting to break out of stage order\n\nLast year we introduced the [`needs`](https://docs.gitlab.com/ee/ci/yaml/#needs) keyword which allows a user to create a Directed Acyclic Graphs (DAG) to speed up the pipeline. A job that uses the `needs` keyword creates a dependency between it and one or more different jobs in earlier stages. The job is allowed to start as soon as the earlier jobs finish, skipping the stage order to speed up the pipeline.\n\nIn a sense, you can think of a pipeline that only uses stages as the same as a pipeline that uses `needs` – except every job \"needs\" every job in the previous stage. On the other hand, if jobs in a pipeline *do* use `needs`, they only \"need\" the exact jobs that will allow them to complete successfully. They shouldn't need all the jobs in the previous stage. For example, there's no need for a ruby test job to wait for a javascript linter to complete.\n\n## Stageless pipelines become reality\n\nThe `needs` keyword quickly became popular among our users and helped optimize and accelerate CI/CD pipelines. However it had one limitation: A `needs` dependency could only exist between the jobs in different stages. This limitation was a pain point for our users because they wanted to configure the pipeline based on the `needs` dependencies only and drop the use of stages completely. The importance of adding this functionality became clear because this was one of the most popular [feature requests](https://gitlab.com/gitlab-org/gitlab/-/issues/30632) for GitLab CI/CD.\n\nNow in GitLab 14.2, [you can finally define a whole pipeline using nothing but `needs` to control the execution order](/releases/2021/08/22/gitlab-14-2-released/#stageless-pipelines). No more need to define any stages if you use `needs`!\n\n## Are we getting rid of stages?\n\nNo, we do not have any plans to remove stages from our GitLab CI/CD, and it still works great for those that prefer this workflow.\n\nIn fact if you build a \"stageless\" pipeline, there will still be at least one stage that holds all the jobs. Removing stages was never the goal. Our goal is still to support you in building better and faster pipelines, while providing you with the high degree of flexibility you want.\n\nAs always, share any thoughts, comments, or questions, by [opening an issue in GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/new?issue%5Bmilestone_id%5D=) and mentioning me (@dhershkovitch).\n",[721,9],{"slug":1843,"featured":6,"template":701},"stageless-pipelines","content:en-us:blog:stageless-pipelines.yml","Stageless Pipelines","en-us/blog/stageless-pipelines.yml","en-us/blog/stageless-pipelines",{"_path":1849,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1850,"content":1856,"config":1862,"_id":1864,"_type":14,"title":1865,"_source":16,"_file":1866,"_stem":1867,"_extension":19},"/en-us/blog/the-kubecon-summary-from-a-product-perspective",{"title":1851,"description":1852,"ogTitle":1851,"ogDescription":1852,"noIndex":6,"ogImage":1853,"ogUrl":1854,"ogSiteName":686,"ogType":687,"canonicalUrls":1854,"schema":1855},"How what we learned at KubeCon EU 2022 will impact our product roadmaps","Platform integrations and secrets management are among our product team's primary takeaways. Find out why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097776/Blog/Hero%20Images/Blog/Hero%20Images/2_2.png_1750097776369.png","https://about.gitlab.com/blog/the-kubecon-summary-from-a-product-perspective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How what we learned at KubeCon EU 2022 will impact our product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-05-31\",\n      }",{"title":1851,"description":1852,"authors":1857,"heroImage":1853,"date":1859,"body":1860,"category":1103,"tags":1861},[1858],"Viktor Nagy","2022-05-31","\nAfter two years of only virtual KubeCon events, the GitLab product team was excited to participate in and meet colleagues, partners, and more from our industry at KubeCon EU 2022, held in Valencia, Spain. We were present with four product leaders, a software developer, and a UX researcher. This post summarizes our primary takeaways from the conference, an experience that will affect our roadmaps.\n\nWe will discuss the following topics:\n\n- Internal platforms and GitOps\n- Secrets management\n- Infrastructure integrations\n- WebAssembly a.k.a. WASM\n\nThere were 32 topic types and several 0-day events at KubeCon. Many talks focused on a few tools. Many Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)) projects had their community meetings during these days. Some talks were given IRL, and others were broadcast virtually with live Q&A. There were a variety of topics and approaches. There were many talks about the various aspects of cluster management, too. However, we left this topic out on purpose because at GitLab we want to focus on the software developers and provide one DevOps platform to support their work. Cluster management is one step away from this focus. Still, we noticed some remarkable patterns as highlighted by the four elements of our list.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Internal platforms and GitOps\n\nCompanies want their developers to focus on their core business. They create internal platforms to hide the complexity of Day 0-2 operations from their software engineers and still allow the \"shift left\" movement of DevOps. These platforms often involve the welding of several tools.\n\nMany talks presented how the given team or company approached their platform problem and what tools they used, and one could often feel the 18-month sweat of a whole platform team trying to come up with a solution.\n\nThese platforms use either a push- or pull-based model for deployments. No single approach is emerging due to legacy applications and different requirements. While there is a definition of GitOps provided by the [OpenGitOps](https://opengitops.dev/) initiative, several presenters offered their own definitions, including of pull-based deployments.\n\nWe fielded a large-scale survey related to secrets at KubeCon, and learned that users would like help with the [Pipeline Authoring](/direction/verify/pipeline_composition/) workflow.\n\nBesides the wiring of the tools, the industry is still looking for a unified approach to multi-tenancy (there might not be one), and sometimes integrating security processes seems overly challenging.\n\n### How does this affect our roadmap?\n\nThere is a lot of potential in building a platform used as the starting point for internal platforms. Imagine a \"tool\" that shortens the time required to create an internal platform to days or weeks instead of a whole year. This is the GitLab vision of The One DevOps platform.\n\nAs a result, we don't plan any changes in our direction. We will continue investing in the recently started [Deployment direction](/direction/delivery/) to provide all the building blocks for a platform in a single tool and are already actively looking for integrated experiences across our offering.\n\nWe’re working on a CI/CD Component Catalog that includes CI templates. This will [support the Pipeline Authoring workflow](https://gitlab.com/groups/gitlab-org/-/epics/7462).\n\n## Secrets management\n\nOne of the things that often came up in our discussions is secrets management. We fielded a large-scale survey related to secrets at KubeCon, and attendees were glad that we’re thinking about this topic. Security is part of the DevOps discussion, and secrets management is a serious issue, especially in a cloud-native aspect.\n\n- Jenkins, GitHub and GitLab were all mentioned during the secret management discussions.\n- Users would like to offload the secrets management responsibility to another product. In many cases, their security requirements are strict, so they don't want/can't handle secrets by themselves.\n- Hashicorp Vault is a preferred tool (primarily in large enterprise companies working in finance or government) to manage and handle secrets. At the same time, most companies would like to avoid operating one more application in their stack.\n- Open ID Connect [OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html) with the JSON web token (JWT) is an essential direction for us.\n\n### How does this affect our roadmap?\n\nWe should invest more in secrets management since this is a pain our customers would like us to solve, and it's becoming a nonstarter feature for many organizations.\n\nWe want to advance in three main vectors:\n\n- Improve our existing secrets management solution - although we don't have a clear solution, we should improve our current variables capabilities to include additional features that could help users leverage variables for secrets. So it would be a \"good enough\" feature they can use. We are actively working toward this direction by removing some of the limitations we have around [variables and masking](https://gitlab.com/groups/gitlab-org/-/epics/1994).\n- Improve our existing [Hashicorp Vault integration](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) using the JWT token, allowing us to integrate with additional vendors (AWS, AZURE, GCP). Like the previous point, we are moving toward this direction by supporting OIDC and [adding audience claims to our JWT token](https://gitlab.com/groups/gitlab-org/-/epics/7335).\n- We need to develop [a clear strategy for a built-in secrets management solution](/direction/govern/pipeline_security/secrets_management/#next-9-12-monhts). In order to provide our users/customers with choice, GitLab wants to use Hashicorp Vault for secrets management handling. We believe that our approach should be not to build the logic ourselves but to leverage an open source, [cloud native](/topics/cloud-native/) project that we could build into GitLab.\n\n## Infrastructure integrations\n\nInfrastructure integrations came in several flavors during the talks. Some are about cluster management, that is not our focus in this blog. Several presentations show that internal platforms need a strong infrastructure aspect, too. When a new project/microservice is started, it might require a new namespace in the cluster with associated RBAC and policies, optionally storage, a source code management repo with automation, and the appropriate permissions. Deployments might create ephemeral environments or could modify the underlying environment within predefined constraints.\n\nThe top tools mentioned in this area are:\n\n- Terraform\n- Crossplane\n- Pulumi\n\n### How does this affect our roadmap?\n\nGitLab already has [great integrations for Terraform](https://docs.gitlab.com/ee/user/infrastructure/iac/), and the other tools are on our radar, too.\n\nWe are open to integrations but cannot currently prioritize the other integrations on our own. We hope that the community will be interested in contributing to benefit everyone.\n\nBuilding Docker containers might not be necessary to get easy-to-manage container binaries. WASM runtimes become available for Kubernetes, and many programming languages can natively compile to WASM. WASM can provide a secure runtime environment without Docker and might be able to simplify the toolchain developers need to learn.\n\nWe don't plan to add direct WASM support to GitLab yet. The generic package registry can hold WASM modules while their deployment is up to the user.\n\nAt the same time, we see a lot of potential in simple runtime environments built around WASM. While GitLab is not in the business of offering runtime services, we will be actively monitoring the market. We might look into more WASM integrations as we see more demand and tools and services maturing in this space.\n\n## GitLab feedback\n\nIt's great to work on a product where the overall sentiment is positive, both from customers that intensely rely on it and from attendees that have to use other tools but would love to use GitLab or just started to play with it recently.\n\nWe received the following notable mentions as feedback:\n\n- Stability and reliability improved over the last several months.\n- Users love our documentation (primarily around CI) - they mentioned it's easy to use and get started with.\n- Given the size of GitLab and the number of our users, we received feedback about long-outstanding issues. We were happy to respond that we are addressing at least some of them shortly.\n- Several customers had asked if we got some resources for migrating from Jenkins to GitLab.\n- A few customers mentioned that they had to move away from GitLab mainly because of an upper-level decision despite favouring GitLab.\n\n## Conclusions\n\n![The GitLab team](https://about.gitlab.com/images/blogimages/kubecon-gitlab-team.jpg)\n\nWe enjoyed all the talks and were delighted to meet and speak with our users and customers. Thanks to all of you, we could \"feel the pulse\" on how we are doing and validate our direction.\n\nWe hope that this blog will guide those who could not [attend KubeCon](https://about.gitlab.com/events/kubecon/) and serve as a summary for those who did attend. All the recordings will likely be available on YouTube from Jun 6, 2022.\n\nLet us know in the comments if you think we missed some important direction.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality.\nIt is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[959,721,9,550,1001,765],{"slug":1863,"featured":6,"template":701},"the-kubecon-summary-from-a-product-perspective","content:en-us:blog:the-kubecon-summary-from-a-product-perspective.yml","The Kubecon Summary From A Product Perspective","en-us/blog/the-kubecon-summary-from-a-product-perspective.yml","en-us/blog/the-kubecon-summary-from-a-product-perspective",{"_path":1869,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1870,"content":1876,"config":1882,"_id":1884,"_type":14,"title":1885,"_source":16,"_file":1886,"_stem":1887,"_extension":19},"/en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"title":1871,"description":1872,"ogTitle":1871,"ogDescription":1872,"noIndex":6,"ogImage":1873,"ogUrl":1874,"ogSiteName":686,"ogType":687,"canonicalUrls":1874,"schema":1875},"Using child pipelines to continuously deploy to five environments","Learn how to manage continuous deployment to multiple environments, including temporary, on-the-fly sandboxes, with a minimalist GitLab workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097012/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750097011626.jpg","https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using child pipelines to continuously deploy to five environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olivier Dupré\"}],\n        \"datePublished\": \"2024-09-26\",\n      }",{"title":1871,"description":1872,"authors":1877,"heroImage":1873,"date":1879,"body":1880,"category":741,"tags":1881},[1878],"Olivier Dupré","2024-09-26","DevSecOps teams sometimes require the ability to manage continuous\ndeployment across multiple environments — and they need to do so without\nchanging their workflows. The [GitLab DevSecOps\nplatform](https://about.gitlab.com/) supports this need, including\ntemporary, on-the-fly sandboxes, with a minimalist approach. In this\narticle, you'll learn how to run continuous deployment of infrastructure\nusing Terraform, over multiple environments.\n\n\nThis strategy can easily be applied to any project, whether it is\ninfrastructure as code (IaC) relying on another technology, such as\n[Pulumi](https://www.pulumi.com/) or [Ansible](https://www.ansible.com/),\nsource code in any language, or a monorepo that mixes many languages.\n\n\nThe final pipeline that you will have at the end of this tutorial will\ndeploy:\n\n\n* A temporary **review** environment for each feature branch.\n\n* An **integration** environment, easy to wipe out and deployed from the\nmain branch.\n\n* A **QA** environment, also deployed from the main branch, to run quality\nassurance steps.\n\n* A **staging** environment, deployed for every tag. This is the last round\nbefore production.\n\n* A **production** environment, just after the staging environment. This one\nis triggered manually for demonstration, but can also be continuously\ndeployed.\n\n\n>Here is the legend for the flow charts in this article:\n\n> * Round boxes are the GitLab branches.\n\n> * Square boxes are the environments.\n\n> * Text on the arrows are the actions to flow from one box to the next.\n\n> * Angled squares are decision steps.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\nOn each step, you'll learn the [why](#why) and the [what](#what) before\nmoving to the [how](#how). This will help you fully understand and replicate\nthis tutorial.\n\n\n## Why\n\n\n* [Continuous\nintegration](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\nis almost a de facto standard. Most companies have implemented CI pipelines\nor are willing to standardize their practice.\n\n\n* [Continuous\ndelivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd),\nwhich pushes artifacts to a repository or registry at the end of the CI\npipeline, is also popular.\n\n\n* Continuous deployment, which goes further and deploys these artifacts\nautomatically, is less widespread. When it has been implemented, we see it\nessentially in the application field. When discussing continuously\ndeploying  infrastructure, the picture seems less obvious, and is more about\nmanaging several environments. In contrast, testing, securing, and verifying\nthe infrastructure's code seems more challenging. And this is one of the\nfields where DevOps has not yet reached its maturity. One of the other\nfields is to shift security left, integrating security teams and, more\nimportantly, security concerns, earlier in the delivery lifecycle, to\nupgrade from DevOps to ***DevSecOps***.\n\n\nGiven this high-level picture, in this tutorial, you will work toward a\nsimple, yet efficient way to implement DevSecOps for your infrastructure\nthrough the example of deploying resources to five environments, gradually\nprogressing from development to production.\n\n\n__Note:__ Even if I advocate embracing a FinOps approach and reducing the\nnumber of environments, sometimes there are excellent reasons to maintain\nmore than just dev, staging, and production. So, please, adapt the examples\nbelow to match your needs.\n\n\n## What\n\n\nThe rise of cloud technology has driven the usage of IaC. Ansible and\nTerraform were among the first to pave the road here. OpenTofu, Pulumi, AWS\nCDK, Google Deploy Manager, and many others joined the party.\n\n\nDefining IaC is a perfect solution to feel safe when deploying\ninfrastructure. You can test it, deploy it, and replay it again and again\nuntil you reach your goal.\n\n\nUnfortunately, we often see companies maintain several branches, or even\nrepositories, for each of their target environments. And this is where the\nproblems start. They are no longer enforcing a process. They are no longer\nensuring that any change in the production code base has been accurately\ntested in previous environments. And they start seeing drifts from one\nenvironment to the other.\n\n\nI realized this tutorial was necessary when, at a conference I attended,\nevery participant said they do not have a workflow that enforces the\ninfrastructure to be tested thoroughly before being deployed to production.\nAnd they all agreed that sometimes they patch the code directly in\nproduction. Sure, this is fast, but is it safe? How do you report back to\nprevious environments? How do you ensure there are no side effects? How do\nyou control whether you are putting your company at risk with new\nvulnerabilities being pushed too quickly in production?\n\n\nThe question of *why* DevOps teams deploy directly to production is critical\nhere. Is it because the pipeline could be more efficient or faster? Is there\nno automation? Or, even worse, because there is *no way to test accurately\noutside of production*?\n\n\nIn the next section, you will learn how to implement automation for your\ninfrastructure and ensure that your DevOps team can effectively test what\nyou are doing before pushing to any environment impacting others. You will\nsee how your code is secured and its deployment is controlled, end-to-end.\n\n\n## How\n\n\nAs mentioned earlier, there are many IaC languages out there nowadays and we\nobjectively cannot cover *all* of them in a single article. So, I will rely\non a basic Terraform code running on Version 1.4. Please do not focus on the\nIaC language itself but instead on the process that you could apply to your\nown ecosystem.\n\n\n### The Terraform code\n\n\nLet's start with a fundamental Terraform code.\n\n\nWe are going to deploy to AWS, a virtual private cloud (VPC), which is a\nvirtual network. In that VPC, we will deploy a public and a private subnet.\nAs their name implies, they are subnets of the main VPC. Finally, we will\nadd an Elastic Cloud Compute (EC2) instance (a virtual machine) in the\npublic subnet.\n\n\nThis demonstrates the deployment of four resources without adding too much\ncomplexity. The idea is to focus on the pipeline, not the code.\n\n\nHere is the target we want to reach for your repository.\n\n\n![target for\nrepository](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097033415.png)\n\n\nLet’s do it step by step.\n\n\nFirst, we declare all resources in a `terraform/main.tf` file:\n\n\n```terraform\n\nprovider \"aws\" {\n  region = var.aws_default_region\n}\n\n\nresource \"aws_vpc\" \"main\" {\n  cidr_block = var.aws_vpc_cidr\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\n\nresource \"aws_subnet\" \"public_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_public_subnet_cidr\n\n  tags = {\n    Name = \"Public Subnet\"\n  }\n}\n\nresource \"aws_subnet\" \"private_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_private_subnet_cidr\n\n  tags = {\n    Name = \"Private Subnet\"\n  }\n}\n\n\nresource \"aws_instance\" \"sandbox\" {\n  ami           = var.aws_ami_id\n  instance_type = var.aws_instance_type\n\n  subnet_id = aws_subnet.public_subnet.id\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\n```\n\n\nAs you can see, there are a couple of variables that are needed for this\ncode, so let's declare them in a `terraform/variables.tf` file:\n\n\n```terraform\n\nvariable \"aws_ami_id\" {\n  description = \"The AMI ID of the image being deployed.\"\n  type        = string\n}\n\n\nvariable \"aws_instance_type\" {\n  description = \"The instance type of the VM being deployed.\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\n\nvariable \"aws_vpc_cidr\" {\n  description = \"The CIDR of the VPC.\"\n  type        = string\n  default     = \"10.0.0.0/16\"\n}\n\n\nvariable \"aws_public_subnet_cidr\" {\n  description = \"The CIDR of the public subnet.\"\n  type        = string\n  default     = \"10.0.1.0/24\"\n}\n\n\nvariable \"aws_private_subnet_cidr\" {\n  description = \"The CIDR of the private subnet.\"\n  type        = string\n  default     = \"10.0.2.0/24\"\n}\n\n\nvariable \"aws_default_region\" {\n  description = \"Default region where resources are deployed.\"\n  type        = string\n  default     = \"eu-west-3\"\n}\n\n\nvariable \"aws_resources_name\" {\n  description = \"Default name for the resources.\"\n  type        = string\n  default     = \"demo\"\n}\n\n```\n\n\nAlready, we are almost good to go on the IaC side. What's missing is a way\nto share the Terraform states. For those who don't know, Terraform works\nschematically doing the following:\n\n\n* `plan` checks the differences between the current state of the\ninfrastructure and what is defined in the code. Then, it outputs the\ndifferences.\n\n* `apply` applies the differences in the `plan` and updates the state.\n\n\nFirst round, the state is empty, then it is filled with the details (ID,\netc.) of the resources applied by Terraform.\n\n\nThe problem is: Where is that state stored? How do we share it so several\ndevelopers can collaborate on code?\n\n\nThe solution is fairly simple: Leverage GitLab to store and share the state\nfor you through a [Terraform HTTP\nbackend](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).\n\n\nThe first step in using this backend is to create the most simple\n`terraform/backend.tf` file. The second step will be handled in the\npipeline.\n\n\n```terraform\n\nterraform {\n  backend \"http\" {\n  }\n}\n\n```\n\n\nEt voilà! We have a bare minimum Terraform code to deploy these four\nresources. We will provide the variable values at the runtime, so let's do\nthat later.\n\n\n### The workflow\n\n\nThe workflow that we are going to implement now is the following:\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\n1. Create a **feature** branch. This will continuously run all scanners on\nthe code to ensure that it is still compliant and secured. This code will be\ncontinuously deployed to a temporary environment `review/feature_branch`\nwith the name of the current branch. This is a safe environment where the\ndevelopers and operations teams can test their code without impacting\nanybody. This is also where we will enforce the process, like enforcing code\nreviews and running scanners, to ensure that the quality and security of the\ncode are acceptable and do not put your assets at risk. The infrastructure\ndeployed by this branch is automatically destroyed when the branch is\nclosed. This helps you keep your budget under control.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\u003C/pre>\n\n\n2. Once approved, the feature branch will be **merged** into the main\nbranch. This is a [protected\nbranch](https://docs.gitlab.com/ee/user/project/protected_branches.html)\nwhere no one can push. This is mandatory to ensure that every change request\nto production is thoroughly tested. That branch is also continuously\ndeployed. The target here is the `integration` environment. To keep this\nenvironment slightly more stable, its deletion is not automated but can be\ntriggered manually.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main) -->|auto deploy| E[integration]\n\u003C/pre>\n\n\n3. From there, manual approval is required to trigger the next deployment.\nThis will deploy the main branch to the `qa` environment. Here, I have set a\nrule to prevent deletion from the pipeline. The idea is that this\nenvironment should be quite stable (after all, it's already the third\nenvironment), and I would like to prevent deletion by mistake. Feel free to\nadapt the rules to match your processes.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main)-->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\u003C/pre>\n\n\n4. To proceed, we will need to **tag** the code. We are relying on\n[protected\ntags](https://docs.gitlab.com/ee/user/project/protected_tags.html) here to\nensure that only a specific set of users are allowed to deploy to these last\ntwo environments. This will immediately trigger a deployment to the\n`staging` environment.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main) -->|tag| G(X.Y.Z)\n    F[qa] -->|validate| G\n\n    G -->|auto deploy| H[staging]\n\u003C/pre>\n\n\n5. Finally, we are landing to `production`. When discussing infrastructure,\nit is often challenging to deploy progressively (10%, 25%, etc.), so we will\ndeploy the whole infrastructure. Still, we control that deployment with a\nmanual trigger of this last step. And to enforce maximum control on this\nhighly critical environment, we will control it as a [protected\nenvironment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    H[staging] -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\n### The pipeline\n\n\nTo implement the above [workflow](#the-workflow), we are now going to\nimplement a pipeline with two [downstream\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n\n\n#### The main pipeline\n\n\nLet's start with the main pipeline. This is the one that will be triggered\nautomatically on any **push to a feature branch**, any **merge to the\ndefault branch**, or any **tag**. *The one* that will do true **continuous\ndeployment** to the following environments: `dev`, `integration`, and\n`staging`. And it is declared in the `.gitlab-ci.yml` file at the root of\nyour project.\n\n\n![the repository\ntarget](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097033417.png)\n\n\n```yml\n\nStages:\n  - test\n  - environments\n\n.environment:\n  stage: environments\n  variables:\n    TF_ROOT: terraform\n    TF_CLI_ARGS_plan: \"-var-file=../vars/$variables_file.tfvars\"\n  trigger:\n    include: .gitlab-ci/.first-layer.gitlab-ci.yml\n    strategy: depend            # Wait for the triggered pipeline to successfully complete\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nreview:\n  extends: .environment\n  variables:\n    environment: review/$CI_COMMIT_REF_SLUG\n    TF_STATE_NAME: $CI_COMMIT_REF_SLUG\n    variables_file: review\n    TF_VAR_aws_resources_name: $CI_COMMIT_REF_SLUG  # Used in the tag Name of the resources deployed, to easily differenciate them\n  rules:\n    - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n\nintegration:\n  extends: .environment\n  variables:\n    environment: integration\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nstaging:\n  extends: .environment\n  variables:\n    environment: staging\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_TAG\n\n#### TWEAK\n\n# This tweak is needed to display vulnerability results in the merge\nwidgets.\n\n# As soon as this issue https://gitlab.com/gitlab-org/gitlab/-/issues/439700\nis resolved, the `include` instruction below can be removed.\n\n# Until then, the SAST IaC scanners will run in the downstream pipelines,\nbut their results will not be available directly in the merge request\nwidget, making it harder to track them.\n\n# Note: This workaround is perfectly safe and will not slow down your\npipeline.\n\ninclude:\n  - template: Security/SAST-IaC.gitlab-ci.yml\n#### END TWEAK\n\n\n```\n\n\nThis pipeline runs only two stages: `test` and  `environments`. The former\nis needed for the *TWEAK* to run scanners. The later triggers a child\npipeline with a different set of variables for each case defined above (push\nto the branch, merge to the default branch, or tag).\n\n\nWe are adding here a dependency with the keyword\n[strategy:depend](https://docs.gitlab.com/ee/ci/yaml/index.html#triggerstrategy)\non our child pipeline so the pipeline view in GitLab will be updated only\nonce the deployment is finished.\n\n\nAs you can see here, we are defining a base job,\n[hidden](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs), and we are\nextending it with specific variables and rules to trigger only one\ndeployment for each target environment.\n\n\nBesides the [predefined\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html),\nwe are using two new entries that we need to define:\n\n1. [The variables specific](#the-variable-definitions) to each environment:\n`../vars/$variables_file.tfvars`\n\n2. [The child pipeline](#the-child-pipeline), defined in\n`.gitlab-ci/.first-layer.gitlab-ci.yml`\n\n\nLet's start with the smallest part, the variable definitions.\n\n\n### The variable definitions\n\n\nWe are going here to mix two solutions to provide variables to Terraform:\n\n\n* The first one using [.tfvars\nfiles](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files)\nfor all non-sensitive input, which should be stored within GitLab.\n\n\n![solution one to provide variables to\nTerraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097033419.png)\n\n\n* The second using [environment\nvariables](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables)\nwith the prefix `TF_VAR`. That second way to inject variables, associated\nwith the GitLab capacity to [mask\nvariables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable),\n[protect\nthem](https://docs.gitlab.com/ee/ci/variables/#protect-a-cicd-variable), and\n[scope them to\nenvironments](https://docs.gitlab.com/ee/ci/environments/index.html#limit-the-environment-scope-of-a-cicd-variable)\nis a powerful solution to **prevent sensitive information leakages**. (If\nyou consider your production’s private CIDR very sensitive, you could\nprotect it like this, ensuring it is only available for the `production`\nenvironment, for pipelines running against protected branches and tags, and\nthat its value is masked in the job’s logs.)\n\n\n![solution two to provide variables to\nTerraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097033422.png)\n\n\nAdditionally, each variable file should be controlled through a\n[`CODEOWNERS` file](https://docs.gitlab.com/ee/user/project/codeowners/) to\nset who can modify each of them.\n\n\n```\n\n[Production owners] \n\nvars/production.tfvars @operations-group\n\n\n[Staging owners]\n\nvars/staging.tfvars @odupre @operations-group\n\n\n[CodeOwners owners]\n\nCODEOWNERS @odupre\n\n```\n\n\nThis article is not a Terraform training, so we will go very fast and simply\nshow here the `vars/review.tfvars` file. Subsequent environment files are,\nof course, very similar. Just set the non-sensitive variables and their\nvalues here.\n\n\n```shell\n\naws_vpc_cidr = \"10.1.0.0/16\"\n\naws_public_subnet_cidr = \"10.1.1.0/24\"\n\naws_private_subnet_cidr = \"10.1.2.0/24\"\n\n```\n\n\n#### The child pipeline\n\n\nThis one is where the actual work is done. So, it is slightly more complex\nthan the first one. But there is no difficulty here that we cannot overcome\ntogether!\n\n\nAs we have seen in the definition of the [main\npipeline](#the-main-pipeline), that downstream pipeline is declared in the\nfile `.gitlab-ci/.first-layer.gitlab-ci.yml`.\n\n\n![Downstream pipeline declared in\nfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097033424.png)\n\n\nLet's break it down into small chunks. We'll see the big picture at the end.\n\n\n##### Run Terraform commands and secure the code\n\n\nFirst, we want to run a pipeline for Terraform. We, at GitLab, are open\nsource. So, our Terraform template is open source. And you simply need to\ninclude it. This can be achieved with the following snippet:\n\n\n```yml\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n```\n\n\nThis template runs for you the Terraform checks on the formatting and\nvalidates your code, before planning and applying it. It also allows you to\ndestroy what you have deployed.\n\n\nAnd, because GitLab is the a single, unified DevSecOps platform, we are also\nautomatically including two security scanners within that template to find\npotential threats in your code and warn you before you deploy it to the next\nenvironments.\n\n\nNow that we have checked, secured, built, and deployed our code, let's do\nsome tricks.\n\n\n##### Share cache between jobs\n\n\nWe will cache the job results to reuse them in subsequent pipeline jobs.\nThis is as simple as adding the following piece of code:\n\n\n```yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n```\n\n\nHere, we are defining a different cache for each commit, falling back to the\nmain branch name if needed.\n\n\nIf we look carefully at the templates that we are using, we can see that it\nhas some rules to control when jobs are run. We want to run all controls\n(both QA and security) on all branches. So, we are going to override these\nsettings.\n\n\n##### Run controls on all branches\n\n\nGitLab templates are a powerful feature where one can override only a piece\nof the template. Here, we are interested only in overwriting the rules of\nsome jobs to always run quality and security checks. Everything else defined\nfor these jobs will stay as defined in the template.\n\n\n```yml\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - when: always\n\niac-sast:\n  rules:\n    - when: always\n```\n\n\nNow that we have enforced the quality and security controls, we want to\ndifferentiate how the main environments (integration and staging) in the\n[workflow](#the-workflow) and review environments behave. Let's start by\ndefining the main environment’s behavior, and we will tweak this\nconfiguration for the review environments.\n\n\n##### CD to integration and staging\n\n\nAs defined earlier, we want to deploy the main branch and the tags to these\ntwo environments. We are adding rules to control that on both the `build`\nand `deploy` jobs. Then, we want to enable `destroy` only for the\n`integration` as we have defined `staging` to be too critical to be deleted\nwith a single click. This is error-prone and we don't want to do that.\n\n\nFinally, we are linking the `deploy` job to the `destroy` one, so we can\n`stop` the environment directly from GitLab GUI.\n\n\nThe `GIT_STRATEGY` is here to prevent retrieving the code from the source\nbranch in the runner when destroying. This would fail if the branch has been\ndeleted manually, so we are relying on the cache to get everything we need\nto run the Terraform instructions.\n\n\n```yml\n\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env\n(integration or staging) when merging to default branch or tagging. Second\nlayer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n```\n\n\nAs said, this matches the need to deploy to `integration` and `staging`. But\nwe are still missing a temporary environment where the developers can\nexperience and validate their code without impacts on others. This is where\nthe deployment to the `review` environment takes place.\n\n\n##### CD to review environments\n\n\nDeploying to review environment is not too different than deploying to\n`integration` and `staging`. So we will once again leverage GitLab's\ncapacity to overwrite only pieces of job definition here.\n\n\nFirst, we set rules to run these jobs only on feature branches.\n\n\nThen, we link the `deploy_review` job to `destroy_review`. This will allow\nus to stop the environment **manually** from the GitLab user interface, but\nmore importantly, it will **automatically trigger the environment\ndestruction** when the feature branch is closed. This is a good FinOps\npractice to help you control your operational expenditures.\n\n\nSince Terraform needs a plan file to destroy an infrastructure, exactly like\nit needs one to build an infrastructure, then we are adding a dependency\nfrom `destroy_review` to `build_review`, to retrieve its artifacts.\n\n\nFinally, we see here that the environment's name is set to `$environment`.\nIt has been set in the [main pipeline](#the-main-pipeline) to\n`review/$CI_COMMIT_REF_SLUG`, and forwarded to this child pipeline with the\ninstruction `trigger:forward:yaml_variables:true`.\n\n\n```yml\n\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n```\n\n\nSo, to recap, we now have a pipeline that can:\n\n\n* Deploy temporary review environments, which are automatically cleaned up\nwhen the feature branch is closed\n\n* Continuously deploy the **default branch** to `integration`\n\n* Continuously deploy the **tags** to `staging`\n\n\nLet's now add an extra layer, where we will deploy, based on a manual\ntrigger this time, to `qa` and `production` environments.\n\n\n##### Continously deploy to QA and production\n\n\nBecause not everybody is willing to deploy continuously to production, we\nwill add a manual validation to the next two deployments. From a purely\n**CD** perspective, we would not add this trigger, but take this as an\nopportunity to learn how to run jobs from other triggers.\n\n\nSo far, we have started a [child pipeline](#the-child-pipeline) from the\n[main pipeline](#the-main-pipeline) to run all deployments.\n\n\nSince we want to run other deployments from the default branch and the tags,\nwe will add another layer dedicated to these additional steps. Nothing new\nhere. We will just repeat exactly the same process as the one we only did\nfor the [main pipeline](#the-main-pipeline). Going this way allows you to\nmanipulate as many layers as you need. I have already seen up to nine\nenvironments in some places.\n\n\nWithout arguing once again on the benefits to have fewer environments, the\nprocess that we are using here makes it very easy to implement the same\npipeline all the way from early stages to final delivery, while keeping your\npipeline definition simple and split in small chunks that you can maintain\nat no cost.\n\n\nTo prevent variable conflicts here, we are just using new var names to\nidentify the Terraform state and input file.\n\n\n```yml\n\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\n**One important trick here is the strategy used for the new downstream\npipeline.** We leave that `trigger:strategy` to its default value;\notherwise, the [main pipeline](#the-main-pipeline) would wait for your\n[grand-child pipeline](#the-grand-child-pipeline) to finish. With a manual\ntrigger, this could last for a very long time and make your pipeline\ndashboard harder to read and understand.\n\n\nYou have probably already wondered what is the content of that\n`.gitlab-ci/.second-layer.gitlab-ci.yml` file we are including here.  We\nwill cover that in the next section.\n\n\n##### The first layer complete pipeline definition\n\n\nIf you are looking for a complete view of this first layer (stored in\n`.gitlab-ci/.first-layer.gitlab-ci.yml`), just expand the section below.\n\n\n```yml\n\nvariables:\n  TF_VAR_aws_ami_id: $AWS_AMI_ID\n  TF_VAR_aws_instance_type: $AWS_INSTANCE_TYPE\n  TF_VAR_aws_default_region: $AWS_DEFAULT_REGION\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n  - cleanup\n  - 2nd_layer       # Use to deploy a 2nd environment on both the main branch and on the tags\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\niac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\n###########################################################################################################\n\n## Integration env. and Staging. env\n\n##  * Auto-deploy to Integration on merge to main.\n\n##  * Auto-deploy to Staging on tag.\n\n##  * Integration can be manually destroyed if TF_DESTROY is set to true.\n\n##  * Destroy of next env. is not automated to prevent errors.\n\n###########################################################################################################\n\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env\n(integration or staging) when merging to default branch or tagging. Second\nlayer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n###########################################################################################################\n\n\n###########################################################################################################\n\n## Dev env.\n\n##  * Temporary environment. Lives and dies with the Merge Request.\n\n##  * Auto-deploy on push to feature branch.\n\n##  * Auto-destroy on when Merge Request is closed.\n\n###########################################################################################################\n\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n###########################################################################################################\n\n\n###########################################################################################################\n\n## Second layer\n\n##  * Deploys from main branch to qa env.\n\n##  * Deploys from tag to production.\n\n###########################################################################################################\n\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n###########################################################################################################\n\n```\n\n\nAt this stage, we are already deploying safely to three environments. That\nis my personal ideal recommendation. However, if you need more environments,\nadd them to your CD pipeline.\n\n\nYou have certainly already noted that we include a downstream pipeline with\nthe keyword `trigger:include`. This includes the file\n`.gitlab-ci/.second-layer.gitlab-ci.yml`. We want to run almost the same\npipeline so obviously, its content is very similar to the one we have\ndetailed above. The main advantage here to define this [grand-child\npipeline](#the-grand-child-pipeline) is that it lives on its own, making\nboth variables and rules way easier to define.\n\n\n### The grand-child pipeline\n\n\nThis second layer pipeline is a brand new pipeline. Hence, it needs to mimic\nthe first layer definition with:\n\n\n* [Inclusion of the Terraform\ntemplate](#run-terraform-commands-and-secure-the-code).\n\n* [Enforcement of security checks](#run-controls-on-all-branches). Terraform\nvalidation would be duplicates of the first layer, but security scanners may\nfind threats that did not yet exist when scanners previously ran (for\nexample, if you deploy to production a couple of days after your deployment\nto staging).\n\n* [Overwrite build and deploy jobs to set specific\nrules](#cd-to-review-environments). Note that the `destroy` stage is no\nlonger automated to prevent too fast deletions.\n\n\nAs explained above, the `TF_STATE_NAME` and `TF_CLI_ARGS_plan` have been\nprovided from the [main pipeline](#the-main-pipeline) to the [child\npipeline](#the-child-pipeline). We needed another variable name to pass\nthese values from the [child pipeline](#the-child-pipeline) to here, the\n[grand-child pipeline](#the-grand-child-pipeline). This is why they are\npostfixed with `_2` in the child pipeline and the value is copied back to\nthe appropriate variable during the `before_script` here.\n\n\nSince we have already broken down each step above, we can zoom out here\ndirectly to the broad view of the global second layer definition (stored in\n`.gitlab-ci/.second-layer.gitlab-ci.yml`).\n\n\n```yml\n\n# Use to deploy a second environment on both the default branch and the\ntags.\n\n\ninclude:\n  template: Terraform.gitlab-ci.yml\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n\nfmt:\n  rules:\n    - when: never\n\nvalidate:\n  rules:\n    - when: never\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: always\n\n###########################################################################################################\n\n## QA env. and Prod. env\n\n##  * Manually trigger build and auto-deploy in QA\n\n##  * Manually trigger both build and deploy in Production\n\n##  * Destroy of these env. is not automated to prevent errors.\n\n###########################################################################################################\n\nbuild:  # terraform plan\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment:\n    name: $TF_STATE_NAME_2\n    action: prepare\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - when: manual\n\ndeploy: # terraform apply\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment: \n    name: $TF_STATE_NAME_2\n    action: start\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG && $TF_AUTO_DEPLOY == \"true\"\n    - if: $CI_COMMIT_TAG\n      when: manual\n###########################################################################################################\n\n```\n\n\nEt voilà. **We are ready to go.** Feel free to change the way you control\nyour job executions, leveraging for example GitLab's capacity to [delay a\njob](https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-job-after-a-delay)\nbefore deploying to production.\n\n\n## Try it yourself\n\n\nWe finally reached our destination. We are now able to control **deployments\nto five different environments**, with only the **feature branches**, the\n**main branch**, and **tags**.\n\n* We are intensively reusing GitLab open source templates to ensure\nefficiency and security in our pipelines.\n\n* We are leveraging GitLab template capacities to overwrite only the blocks\nthat need custom control.\n\n* We have split the pipeline in small chunks, controlling the downstream\npipelines to match exactly what we need.\n\n\nFrom there, the floor is yours. You could, for example, easily update the\nmain pipeline to trigger downstream pipelines for your software source code,\nwith the\n[trigger:rules:changes](https://docs.gitlab.com/ee/ci/yaml/#ruleschanges)\nkeyword. And use another\n[template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/)\ndepending on the changes that happened. But that is another story.\n",[109,721,9,496,766],{"slug":1883,"featured":6,"template":701},"using-child-pipelines-to-continuously-deploy-to-five-environments","content:en-us:blog:using-child-pipelines-to-continuously-deploy-to-five-environments.yml","Using Child Pipelines To Continuously Deploy To Five Environments","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments.yml","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"_path":1889,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1890,"content":1896,"config":1903,"_id":1905,"_type":14,"title":1906,"_source":16,"_file":1907,"_stem":1908,"_extension":19},"/en-us/blog/using-run-parallel-jobs",{"title":1891,"description":1892,"ogTitle":1891,"ogDescription":1892,"noIndex":6,"ogImage":1893,"ogUrl":1894,"ogSiteName":686,"ogType":687,"canonicalUrls":1894,"schema":1895},"How we used parallel CI/CD jobs to increase our productivity","GitLab uses parallel jobs to help long-running jobs run faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/using-run-parallel-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used parallel CI/CD jobs to increase our productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miguel Rincon\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":1891,"description":1892,"authors":1897,"heroImage":1893,"date":1899,"body":1900,"category":741,"tags":1901},[1898],"Miguel Rincon","2021-01-20","At GitLab, we must verify simultaneous changes from the hundreds of people\nthat contribute to GitLab each day. How can we help them contribute\nefficiently using our pipelines?\n\n\nThe pipelines that we use to build and verify GitLab have more than 90 jobs.\nNot all of those jobs are equal. Some are simple tasks that take a few\nseconds to finish, while others are long-running processes that must be\noptimized carefully.\n\n\nAt the time of this writing, we have more than 700 [pipelines\nrunning](https://gitlab.com/gitlab-org/gitlab/-/pipelines?page=1&scope=all&status=running).\nEach of these pipelines represent changes from team members and contributors\nfrom the wider community. All GitLab contributors must wait for the\npipelines to finish to make sure the change works and integrates with the\nrest of the product. We want our pipelines to finish as fast as possible to\nmaintain the productivity of our teams.\n\n\nThis is why we constantly monitor the duration of our pipelines. For\nexample, in December 2020, successful merge request pipelines had a duration\nof [53.8\nminutes](/handbook/engineering/quality/performance-indicators/#average-merge-request-pipeline-duration-for-gitlab):\n\n\n![Average pipeline duration was 53.8 minutes in\nDecember](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/historical-pipeline-duration.png){:\n.shadow.medium.center}\n\nThe average pipeline took 53.8 minutes to finish in December.\n\n{: .note.text-center}\n\n\nGiven that we run [around 500 merge request\npipelines](https://gitlab.com/gitlab-org/gitlab/-/pipelines/charts) per day,\nwe want to know: Can we optimize our process to change how long-running jobs\n_run_?\n\n\n## How we fixed our bottleneck jobs by making them run in parallel\n\n\nThe `frontend-fixtures` job uses `rspec` to generate mock data files, which\nare then saved as files called \"fixtures\". These files are loaded by our\nfrontend tests, so the `frontend-fixtures` must finish before any of our\nfrontend tests can start.\n\n\n> As not all of our tests need these frontend fixtures, many jobs use the\n[`needs` keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) to start before\nthe `frontend-fixtures` job is done.\n\n\nIn our pipelines, this job looked like this:\n\n\n![The `frontend-fixtures`\njob](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job.png){:\n.shadow.medium.center}\n\nInside the frontend fixtures job.\n\n{: .note.text-center}\n\n\n\nThis job had a normal duration of 20 minutes, and each individual fixture\ncould be generated independently, so we knew there was an opportunity to run\nthis process in parallel.\n\n\nThe next step was to configure our pipeline to split the job into multiple\nbatches that could be run in parallel.\n\n\n## How to make frontend-fixtures a parallel job\n\n\nFortunately, GitLab CI provides an easy way to run a job in parallel using\nthe [`parallel` keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel). In\nthe background, this creates \"clones\" of the same job, so that multiple\ncopies of it can run simultaneously.\n\n\n**Before:**\n\n\n```yml\n\nfrontend-fixtures:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n```\n\n\n**After:**\n\n\n```yml\n\nrspec-ee frontend_fixture:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n  parallel: 2\n```\n\n\nYou will notice two changes. First, we changed the name of the job, so our\njob is picked up by [Knapsack](https://docs.knapsackpro.com/ruby/knapsack)\n(more on that later), and then we add the keyword `parallel`, so the job\ngets duplicated and runs in parallel.\n\n\nThe new jobs that are generated look like this:\n\n\n![Our fixtures job running in\nparallel](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-parallel.png){:\n.shadow.medium.center}\n\nThe new jobs that are picked up by Knapsack and run in parallel.\n\n{: .note.text-center}\n\n\nAs we used a value of `parallel: 2`, actually two jobs are generated with\nthe names:\n\n\n- `rspec-ee frontend_fixture 1/2`\n\n- `rspec-ee frontend_fixture 2/2`\n\n\nOur two \"generated\" jobs, now take three and 17 minutes respectively, giving\nus an overall decrease of about three minutes.\n\n\n![Two parallel jobs in the\npipeline](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-detail.png){:\n.shadow.medium.center}\n\nThe parallel jobs that are running in the pipeline.\n\n{: .note.text-center}\n\n\n## Another way we optimized the process\n\n\nAs we use Knapsack to distribute the test files among the parallel jobs, we\nwere able to make more improvements by reducing the time it takes our\nlongest-running fixtures-generator file to run.\n\n\nWe did this by splitting the file into smaller batches and optimizing it, so\nwe have more tests running in parallel, which shaved off an additional [~3.5\nminutes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158#note_460372560).\n\n\n## Tips for running parallel jobs\n\n\nIf you want to ramp up your productivity you can leverage `parallel` on your\npipelines by following these tips:\n\n\n1. Measure the time your pipelines take to run and identify possible\nbottlenecks to your jobs. You can do this by checking which jobs are slower\nthan others.\n\n1. Once your slow jobs are identified, try to figure out if they can be run\nindependently from each other or in batches.\n   - Automated tests are usually good candidates, as they tend to be self-contained and run in parallel anyway.\n1. Add the `parallel` keyword, while measuring the outcome over the next few\nrunning pipelines.\n\n\n## Learn more about our solution\n\n\nWe discuss how running jobs in parallel improved the speed of pipelines on\nGitLab Unfiltered.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/hKsVH_ZhSAk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nAnd here are links to some of the resources we used to run pipelines in\nparallel:\n\n\n- The [merge request that introduced `parallel` to\nfixtures](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46959).\n\n- An important [optimization\nfollow-up](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158) to\nmake one of the slow tests faster.\n\n- The [Knapsack gem](https://docs.knapsackpro.com/ruby/knapsack), which we\nleverage to split the tests more evenly in multiple CI nodes.\n\n\nAnd many thanks to [Rémy Coutable](/company/team/#rymai), who helped me\nimplement this improvement.\n\n\nCover image by [@dustt](https://unsplash.com/@dustt) on\n[Unsplash](https://unsplash.com/photos/ZqBNb7xK5s8)\n\n{: .note}\n",[853,721,9,743,1902],"workflow",{"slug":1904,"featured":6,"template":701},"using-run-parallel-jobs","content:en-us:blog:using-run-parallel-jobs.yml","Using Run Parallel Jobs","en-us/blog/using-run-parallel-jobs.yml","en-us/blog/using-run-parallel-jobs",{"_path":1910,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1911,"content":1917,"config":1922,"_id":1924,"_type":14,"title":1925,"_source":16,"_file":1926,"_stem":1927,"_extension":19},"/en-us/blog/velocity-with-confidence",{"title":1912,"description":1913,"ogTitle":1912,"ogDescription":1913,"noIndex":6,"ogImage":1914,"ogUrl":1915,"ogSiteName":686,"ogType":687,"canonicalUrls":1915,"schema":1916},"How GitLab 14 satisfies the need for speed with modern DevOps","GitLab 14: Ship with velocity, ship with confidence","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682089/Blog/Hero%20Images/racecar_devops.jpg","https://about.gitlab.com/blog/velocity-with-confidence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab 14 satisfies the need for speed with modern DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":1912,"description":1913,"authors":1918,"heroImage":1914,"date":1919,"body":1920,"category":741,"tags":1921},[1122],"2021-07-29","\n\n## How DevOps and NFS changed the game\n\nWhat if I told you that one of the best-selling racing video game franchises of all time, the \"Need For Speed\" (NFS), and DevOps have more in common with each other than you think? Yes, you read that correctly, probably not the NFS (Network File System) you were expecting.\n\n### An appetite for change\n\nFor context, the NFS series originally set out to redefine a saturated, yet unsophisticated, racing video game market. Motivated by an appetite for change, the NFS user experience reflected the human connection to real cars and how they behaved, which was a big challenge for developers in the 1990s. Nearly 30 years ago, \"The Need for Speed\" forever changed the landscape of racing games, selling 150 million copies since its debut.\n\n![The original Need For Speed game from 1994](https://about.gitlab.com/images/blogimages/need_for_speed.png){: .shadow.center}\nThe original Need For Speed video game set a new standard with an appetite for industry change.\n{: .note.text-center}\n\nCoincidentally, it was in 1994 that Grady Booch coined the term \"continuous integration\" (CI). Booch, like NFS, paved the way for immense industry growth in the realm of software development. CI aimed to redefine the manual, time-consuming development processes that paid little mind to how real humans and developers behaved and collaborated around application development by [leveraging automation to increase development speed without sacrificing quality](/topics/ci-cd/benefits-continuous-integration/).\n\nSimilar to how NFS took the racing scene by storm and laid the groundwork for the racing game genre, CI evolved into what is arguably the most important piece of DevOps best practices today: Continuous integration and continuous delivery (CI/CD).\n\nDevOps continues to evolve, but without CI/CD, DevOps isn't the collaborative practice that helps teams work faster and more efficiently. CI/CD is a super power within DevOps – unlocking the potential to ship apps with increased velocity and confidence in their quality, without having to choose one or the other.\n\n### DIY DevOps vs Modern DevOps\n\nToday, it doesn't matter what your business does, it's going to involve some amount of using and building software. DevOps gained traction in the age of digital transformation, where the rate of technical innovation acted as a forcing function for companies to fail or survive. Over the past 10 years or so, organizations had a choice to either embrace this \"need for speed\" and adopt DevOps practices, or be displaced by their competition.\n\nThis scramble led to a \"DIY\" style of DevOps that couldn't deliver on its promises much of the time. For many organizations, the biggest problem wasn't just the brittle toolchains composed of disparate pieces of software but also trying to make these complicated toolchains and processes benefit from DevOps. Since uprooting everything wasn't an option, the root of the problem was still there, and DevOps was hard to adopt.\n\nFor all the teams DevOps has helped, the DevOps marketplace must continuously improve and evolve as we learn more about the challenges of modernizing workflows. DevOps must modernize alongside businesses to ensure it's an accessible and realistic framework for as many companies as possible to leverage.\n\n### GitLab 14 fuels the modern DevOps need for speed\n\nWith a platform-driven approach, [GitLab 14](/releases/2021/06/22/gitlab-14-0-released/) delivers a consistent and efficient developer and operator experience that leads to a simplified and more predictable SDLC. A single user interface, embedded security, and a unified data store are just some of the features of a platform any company can use without the tradeoffs of the DIY DevOps past. By using one tool for source code management, CI, and CD, teams are more efficient and productive with streamlined collaboration. Engineers are happier when focused on value-add than when maintaining integrations – and happy developers help attract and retain talent.\n\n[GitLab 14](/gitlab-14/) ushers in a new era of modern DevOps as a global movement, and I'm excited to talk a little bit about some of its capabilities that help you ship software faster, with a higher degree of confidence, and improve your ability to respond to market changes.\n\n### Ship with velocity and confidence\n\n**1. [GitLab pipeline editor](/releases/2021/01/22/gitlab-13-8-released/#pipeline-editor)**\n\nCrafting pipelines can be complicated and verbose without an understanding of advanced pipeline syntax and how it fits within the workflow using the '.gitlab-ci.yml' configuration file. Needing to craft pipelines from scratch presents a steeper learning curve for organizations and teams with a less mature DevOps culture. The GitLab pipeline editor lowers the barrier to entry for CI/CD novices and accelerates power users with visual authoring and versioning, continuous validation, and pipeline visualization. Whether you're a more advanced user or novice, the pipeline editor unlocks additional power and usability.\n\n![Pipeline editor linting capability makes pipeline authoring easier](https://about.gitlab.com/images/blogimages/lint_ci.png){: .shadow.center}\nPipeline editor linting capability makes pipeline authoring easier and more efficient.\n{: .note.text-center}\n\nHere's what some of our wider community is saying about the pipeline editor:\n\n> \"I really like the direction of making CI/CD more accessible to first-time users and how GitLab rolls out this feature piece by piece.\" - Bernhard Knasmüller, computer scientist\n\n> \"This is going to improve the CI/CD configuration experience greatly!\" - Olivier Jourdan, developer\n\n**2. [GitLab Agent for Kubernetes](https://youtu.be/17O_ARVaRGo)**\n\nThe GitLab Agent for Kubernetes enables secure, cloud-native [GitOps](/solutions/gitops/). GitLab also meets customers where they are by supporting GitOps with agent-based and agentless approaches, and for deployments anywhere, regardless of whether infrastructure is cloud-native. It also enables alerts based on network policies for pull-based deployments.\n\nHere's piece of feedback from the wider GitLab community on the Kubernetes Agent:\n\n> \"GitLab is leading the evolution of DevOps by optimising work efficiency and cloud-native integration capabilities. This enables the rapid delivery of digital value.\" - Vasanth Kandaswamy, Head of Data and Applications Portfolio, Fujitsu Australia\n\nWe look forward to iterating and improving these capabilities and always [welcome your feedback](/submit-feedback/#product-feedback) on our product.\n\n### What's next?\n\nOne thing is for sure: **people want to go fast,** but not when it requires sacrificing peace of mind and quality. We're committed to helping you ship with velocity and confidence by [investing in specific product areas](/direction/#fy22-product-investment-themes) to bring the benefits of modern DevOps to anyone using GitLab to deliver their applications.\n\n![Go fast with confidence](https://about.gitlab.com/images/blogimages/gofast.gif){: .shadow.center}\nEven Ricky Bobby from Talledega Nights agrees. People just want to go fast!\n{: .note.text-center}\n\nWe'll continue executing on our [vision for CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/4534) to create a visual pipeline authoring experience built right into GitLab that simplifies the complexity, letting you quickly create and edit pipelines while still exposing advanced options when you need them.\n\nWe're also committed to making sure you can deploy anytime and anywhere to take advantage of the benefits of Kubernetes, no matter where you are at on your cloud native development journey. If you have feedback or suggestions on what we can do better, please [let us know in our product epic.](https://gitlab.com/groups/gitlab-org/-/epics/3329)\n\nWe look forward to delivering you more value as we iterate upon this new era of GitLab 14 going foward and can't wait to see the great things you're creating with Gitlab.\n\n_This blog is part three in a three-part series on the top capabilities of GitLab 14. Learn more about [how GitLab 14 prepares you for DevSecOps 2.0 in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/), and about [how to optimize DevOps with GitLab 14's enhanced visibility tools in part two](/blog/optimizing-devops-visibility-in-gitlab-14/)._\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnapk) on [Unsplash](https://unsplash.com/photos/5Yo1P9ErikM)\n{: .note}\n",[765,853,721,9,550],{"slug":1923,"featured":6,"template":701},"velocity-with-confidence","content:en-us:blog:velocity-with-confidence.yml","Velocity With Confidence","en-us/blog/velocity-with-confidence.yml","en-us/blog/velocity-with-confidence",{"_path":1929,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1930,"content":1936,"config":1944,"_id":1946,"_type":14,"title":1947,"_source":16,"_file":1948,"_stem":1949,"_extension":19},"/en-us/blog/vestiaire-collective-on-moving-to-a-devsecops-platform",{"title":1931,"description":1932,"ogTitle":1931,"ogDescription":1932,"noIndex":6,"ogImage":1933,"ogUrl":1934,"ogSiteName":686,"ogType":687,"canonicalUrls":1934,"schema":1935},"Vestiaire Collective's DevSecOps migration: Wins and insights","Support for container registries and integrations with existing tools were the top reasons for the ecommerce company's migration to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670278/Blog/Hero%20Images/fasttrack.jpg","https://about.gitlab.com/blog/vestiaire-collective-on-moving-to-a-devsecops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Vestiaire Collective VP shares wins, insights, and what's next with DevSecOps migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chandler Gibbons\"}],\n        \"datePublished\": \"2023-01-05\",\n      }",{"title":1937,"description":1932,"authors":1938,"heroImage":1933,"date":1940,"body":1941,"category":1942,"tags":1943},"Vestiaire Collective VP shares wins, insights, and what's next with DevSecOps migration",[1939],"Chandler Gibbons","2023-01-05","\n[Vestiaire Collective](https://us.vestiairecollective.com/), an online marketplace for second-hand clothing and luxury items, needed a faster and easier-to-use solution for code reviews and running pipelines. In 2018, the company migrated its codebase to GitLab for its speed and flexibility in setting up custom workflows and pipelines for releases. Since making the move, Vestiaire Collective has taken advantage of GitLab’s integrations with other tools — including [Jenkins for CI/CD](https://docs.gitlab.com/ee/integration/jenkins.html), [Jira](https://docs.gitlab.com/ee/integration/jira/) for issue management, and Nexus artifact storage — to improve productivity and simplify complex toolchains. We talked to Sardorbek Pulatov, vice president of engineering at Vestiaire Collective, about what his team has been able to achieve with the GitLab DevSecOps Platform and the lessons learned along the way.\n\n**What were the challenges that led Vestiaire Collective to explore GitLab?**\n\nWhen Vestiaire Collective started with GitLab back in 2018, we wanted to have a fast and in-house version control system with features such as running pipelines. One of the biggest chunks of our code base, the monolith, was on Subversion. We migrated to GitLab for speed and also the better maintainability, and code reviews being much easier. GitLab has also enabled us to set up workflows and pipelines for our releases. And recently we also created our own tool for releases because we have a custom workflow in Jira.\n\nNow we have not just engineers in GitLab, but also data engineers and data scientists. So, for example, data scientists manage their releases through their repositories in GitLab. They’re actually quite advanced in using GitLab, the data scientist teams. So they use everything new released by GitLab.\n\n**Since moving to a single platform for DevSecOps, what are the biggest benefits you’ve noticed? How has GitLab helped Vestiaire Collective simplify complicated toolchains?**\n\nWhen GitLab released support for container registries and npm, it was such a relief for us because we were using Amazon Elastic Container Registry (ECR) and it was slow because it was in a different location — we deploy in Ireland but our team is spread across Europe and the United States. We also tried to use our own setup with Nexus and support it ourselves, meaning if there was a vulnerability we would need to update it and maintain it separately. Even if that’s only required once every six months, it still takes time. You still need to plan the upgrade. But with GitLab, our problem was solved. Now developers have [a registry for containers inside GitLab](https://docs.gitlab.com/ee/user/packages/container_registry/) so they can easily push new releases of their services.\n\nThe fact that GitLab integrates with the other tools we are using has also been a huge benefit. We use Jira for project management, and thanks to GitLab’s Jira integration, whenever a developer pushes a commit in GitLab it’s fully visible in Jira. And now, with our custom integration, the releases are also synced, so when you create a release in GitLab, it creates a release with the same ticket in Jira.\n\nAs a next step, personally, I would love us to be able to migrate entirely into GitLab for project management, using GitLab [issues](https://docs.gitlab.com/ee/user/project/issues/index.html) and [epics](https://docs.gitlab.com/ee/user/group/epics/). We’re not there yet, but GitLab provides almost all the functionality needed for developers. Tracking everything in GitLab would make it much easier to reference the issues in code reviews. Now, when you create a ticket in Jira, you need to create a branch in GitLab with the Jira ticket number, and then, when you push a commit, you also need to remember the ticket number. But once everything is in GitLab, we’ll be able to just push a commit to a merge request. GitLab already gives us so much transparency into what we are doing. That would be even greater if everyone was using GitLab issues and epics.\n\n**What has the response from your team been like?**\n\nThere have been no complaints about stability or performance, and the performance is improving release by release! GitLab became very fast with [version 15](/releases/2022/05/22/gitlab-15-0-released/) — I can feel and see the performance boost. People are happy. People have been quiet, and when engineers are not complaining, that means that the tool is quite good. \n\n**For companies that are just getting started with GitLab, what advice would you give them on where to start?**\n\nI’d recommend starting with smaller projects, setting up all the steps needed for your pipeline, and trying to use features of GitLab such as issues and epics. In our case, we started with a larger project from our Product Information Management service team — the project’s repository had three services and we needed to run different pipelines for different changes. And even in our case, GitLab was quite flexible. We could say, “Okay, if a commit message has this specific word, then run these steps. If it has this word, run these other steps.”\n\nWhat we learned from that experience was that first it’s valuable to understand what you need to run as a pipeline. What comes to mind first is tests and probably deployment into an environment. Then we need to monitor the performance and see if we need to pass our caches in between the pipelines to speed up the deployment, or in the case of Node.js, do not download [npm packages](https://docs.gitlab.com/ee/user/packages/npm_registry/) in every change or merge request or branch. Just cache it once in the first run. Then you can optimize step by step. So that’s what I mean by starting small.\n\n**What are you most looking forward to doing with GitLab in the future?**\n\nI love this question. First, I would like to point out that GitLab surprises me with each release. Personally, I am looking forward to using more automation tools for QA engineers, as well as auto pipelines and integrations with the latest automation frameworks.\n\nWe recently moved away from Sentry for error tracking, so I’m also interested in exploring doing [error tracking in GitLab](https://docs.gitlab.com/ee/operations/error_tracking.html). And, I’m interested in seeing how we might be able to use [feature flags in GitLab](https://docs.gitlab.com/ee/operations/feature_flags.html). We’re currently using LaunchDarkly for A/B testing, but if GitLab can even match half of that functionality, it would be great to bring everything together into one platform.\n\nFinally, we’re also looking into how we can make our GitLab implementation even better and more stable, so we want to deploy it into [a Kubernetes cluster](https://docs.gitlab.com/ee/user/clusters/agent/). Currently, it’s just deployed into EC2s, so that would be our next big step for GitLab.\n\nPhoto by [Mathew Schwartz](https://unsplash.com/@cadop?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n","customer-stories",[789,1246,743,721,9],{"slug":1945,"featured":6,"template":701},"vestiaire-collective-on-moving-to-a-devsecops-platform","content:en-us:blog:vestiaire-collective-on-moving-to-a-devsecops-platform.yml","Vestiaire Collective On Moving To A Devsecops Platform","en-us/blog/vestiaire-collective-on-moving-to-a-devsecops-platform.yml","en-us/blog/vestiaire-collective-on-moving-to-a-devsecops-platform",{"_path":1951,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1952,"content":1958,"config":1963,"_id":1965,"_type":14,"title":1966,"_source":16,"_file":1967,"_stem":1968,"_extension":19},"/en-us/blog/we-are-building-a-better-heroku",{"title":1953,"description":1954,"ogTitle":1953,"ogDescription":1954,"noIndex":6,"ogImage":1955,"ogUrl":1956,"ogSiteName":686,"ogType":687,"canonicalUrls":1956,"schema":1957},"We are very far from a better Heroku for production apps in a hyper cloud","GitLab is building Heroku for production apps in hyper clouds, integrated into your DevSecOps workflow: The 5 minute production app.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672405/Blog/Hero%20Images/spacex-unsplash.jpg","https://about.gitlab.com/blog/we-are-building-a-better-heroku","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We are very far from a better Heroku for production apps in a hyper cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-03-22\",\n      }",{"title":1953,"description":1954,"authors":1959,"heroImage":1955,"date":1960,"body":1961,"category":695,"tags":1962},[1081],"2021-03-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> Update: This post does not live up to its original title `We are building a better Heroku`. It shows my own personal experience and reflects poorly on competitors. I am sorry about that.\n>\n> It should have emphasized the _building_ part, we're just starting. The current 5 minute production app doesn't hold a candle to Heroku at the moment.\n> It should have made it clear the goals is to improve the speed with which you can configure a production app, not a development app. Development apps on Heroku are already close to perfect. The examples in this post are contrived since it talks about a development app, as [rightly called out by Heroku people](https://twitter.com/johnbeynon/status/1374306499426652161).\n> It should have gone into [why hyper clouds might be preferable](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#hypercloud).\n> It should have talked about state, we made a small improvement in [this MR](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/78028/diffs) but we should have done the [planned work](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/11137) and made one post out of it.\n>\n> We are very far from a better Heroku for production apps in a hyper cloud.\n\nCreating a web application has become very convenient and easy. You’ll start in your local development environment, run a dev server and verify the changes looking good. At a certain point, you want to share it with your friends on the internet. A service or server?\n\n### Use Heroku\n\nI have been a backend developer in the past 20 years. Web development is often fighting with Javascript and CSS. Especially Heroku as a deployment platform is a new area for me.\n\nLet's start with creating an account, login, and follow the web instructions to create a new app in the [documentation](https://devcenter.heroku.com/).\n\nLet’s try a fun demo, a battleship game to learn Javascript on the client and NodeJS on the server.\n\n```\n$ cd ~/dev/opensource\n$ git clone https://github.com/kubowania/battleships\n$ cd battleships\n```\n\nTest it locally, optional.\n\n```\n$ npm install\n$ npm start\n```\n\nInstall the Heroku CLI, on [macOS with Homebrew](/blog/dotfiles-document-and-automate-your-macbook-setup/).\n\n```\n$ brew install heroku/brew/heroku\n\n$ heroku autocomplete\n```\n\nThis opens a new browser window to login. Lets create an app.\n\n```\n$ heroku create\nCreating app... done, ⬢ nameless-mountain-48655\nhttps://nameless-mountain-48655.herokuapp.com/ | https://git.heroku.com/nameless-mountain-48655.git\n```\n\nThe CLI command adds a new Git remote called `heroku` where we need to push into.\n\n```\n$ git push heroku main\n\nremote: -----> Launching...\nremote:        Released v3\nremote:        https://nameless-mountain-48655.herokuapp.com/ deployed to Heroku\nremote:\nremote: Verifying deploy... done.\n```\n\nDeployed in less than 5 minutes. Getting there and installing the pre-requisites on the CLI took longer than expected.\n\n![Battleship web app deployed with Heroku](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_heroku.png){: .shadow.medium.center}\n\nLots of CLI commands involved, and it did not run in a CI/CD pipeline with additional tests before deploying it. Now the web application is deployed into a black box. Want to use Let’s Encrypt and your own domain name? How about adding the deployment natively to GitLab to have a single application in your DevOps workflow?\n\n#### Setting up Persistence with Heroku\n\nThis gets more challenging. Imagine that your app uses a relational database, a caching layer and object storage. This requires lots of CLI commands and a deep dive into the application configuration. We did not touch persistent backends in the demo app above yet.\n\nHeroku offers [PostgreSQL](https://devcenter.heroku.com/categories/postgres-basics), [Redis](https://devcenter.heroku.com/categories/heroku-redis) and [AWS S3](https://devcenter.heroku.com/articles/s3).\n\n```\nheroku addons:create heroku-postgresql:hobby-dev\nheroku addons:create heroku-postgresql:hobby-dev --version=10\n\nheroku pg:promote HEROKU_POSTGRESQL_YELLOW\n```\n\n```\nheroku addons:create heroku-redis:hobby-dev -a 5-min-prod-app\n```\n\nNote that the default `hobby-dev` plan allows unencrypted connections too.\n\n```\nheroku config:set S3_BUCKET_NAME=appname-assets\nheroku config:set AWS_ACCESS_KEY_ID=xxx AWS_SECRET_ACCESS_KEY=yyy\n```\n\nAll stateful backends in Heroku need to be secured. This requires more commands to create self-signed certificates and encrypt transport layers in the backend.\n\nAfter all, is there a better way to automate requesting stateful backend services and automate their provisioning?\n\n### A better Heroku: The 5 minute production app\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">the modern tech industry is basically folks just endlessly remaking remakes of heroku\u003C/p>&mdash; Always Miso (@monkchips) \u003Ca href=\"https://twitter.com/monkchips/status/1368924845740810249?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Truth \u003Ca href=\"https://t.co/AFN9anBbQG\">https://t.co/AFN9anBbQG\u003C/a>\u003C/p>&mdash; Sid Sijbrandij (@sytses) \u003Ca href=\"https://twitter.com/sytses/status/1368982067229253632?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\nCloud resources are cheap. AWS offers a free tier, HashiCorp Terraform has become an excellent tool to manage multi-cloud resources and GitLab integrates app packaging, container registry, deployment and TLS certificates.\n\nThere’s more application goodies: Provision a PostgreSQL VM, add Redis, SMTP email transport, custom domains with Let’s Encrypt.\n\n#### Use the 5 minute production app\n\nThe [documentation](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#usage) says to create a new AWS IAM role with credentials for automation.\n\nThe second step is to have the source code available in a GitLab project. You can use `New project > Import project > Repo by URL` to automatically import the GitHub repository `https://github.com/kubowania/battleships.git`.\n\n![Import the GitHub repository into GitLab](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_new_project_import_github_url.png){: .shadow.medium.center}\n\nOnce imported, navigate into `Settings > CI/CD > Variables` to specify the AWS credentials and region. Ensure to tick the `Masked` checkbox to hide them in all job logs.\n\n![Configure AWS credentials as masked CI/CD variables](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_aws_cicd_variables.png){: .shadow.medium.center}\n\nNavigate back into the project overview. Click the `Setup CI/CD` button or open the Web IDE to create a new `.gitlab-ci.yml` file. Add the remote CI/CD template include like this:\n\n```\nvariables:\n    TF_VAR_DISABLE_POSTGRES: \"true\"\n    TF_VAR_DISABLE_REDIS: \"true\"\n\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nThe battleship application does not need the PostgreSQL and Redis backends. They are disabled with setting `TF_VAR_DISABLE_POSTGRES` and `TF_VAR_DISABLE_REDIS` [variables](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/VARIABLES.md) to `false`.\n\nCommit the change to the default branch.\n\n8:43pm CET: Pipeline started with the build job. 2 min 33 sec.\n\n![GitLab pipeline builds the Docker image with Auto-Build](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_01.png){: .shadow.medium.center}\n\n8:45pm CET: Pipeline runs terraform_apply to provision AWS resources in 2min 47 sec.\n\n![GitLab pipeline runs Terraform to provision cloud resources in AWS](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_02.png){: .shadow.medium.center}\n\n8:48pm CET: Deployed in 1 min 11 sec.\n\nThe deploy job log greets with the URL in ~5 minutes, including a Lets Encrypt TLS certificate. There we go, let’s play some battleship!\n\n![Battleship web app deployed in AWS with the 5 minute production app](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_5minprodapp_aws.png){: .shadow.medium.center}\n\nNote that we never left the browser and there is no CLI involved. Next to the included template, there’s also room for adding more CI tests and security best practices while hacking on this project. You can navigate into your AWS console for debugging and troubleshooting and plan with production budgets, where needed.\n\n#### Setting up Persistence with the 5 Minute Production App\n\nRemember the stateful backends with Heroku above? By default, the 5 minute production app takes care of provisioning:\n\n- PostgreSQL server and secured backend\n- Redis cluster\n- S3 object storage in AWS\n\nThe 5 minute production app uses the managed stateful services of a hypercloud so your data is persisted and secure. By leveraging these managed services (databases, caching, objects storage, etc.) you have less to maintain. Everything is provisioned through Terraform which has the following advantages:\n\n- Terraform is the most popular IaC tool.\n- Terraform works accross platforms.\n- Terraform is well-documented.\n- Terraform state can be [stored and viewed in GitLab](https://docs.gitlab.com/ee/user/infrastructure/#gitlab-managed-terraform-state).\n- You avoid the cost and complexity of Kubernetes.\n- You have complete control to customize and extend.\n\nWe will explore more stateful backends in future apps and blog posts.\n\n### 5 minute production app + DevSecOps = ❤️\n\nExample for [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [SAST](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html):\n\n```\ninclude:\n  - remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n  - template: Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n### More to use: Database backends, TLS, environments\n\nThis blog post covers the basic learning steps with Heroku and the 5 minute production app. A typical web app requires a database, storage or caching backend, which can get complicated to run with Heroku. We will explore the setup and production experience in future blog posts. In addition to backends, we will also look into TLS certificates and production environments in CD workflows.\n\nMeanwhile, try the 5 min production app yourself:\n\n* [5 minute production app docs](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#the-5-minute-production-app)\n* [Example projects](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#examples)\n* Your own future web app with [your custom domain](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#custom-domain)?\n\nCover image by [SpaceX](https://unsplash.com/@spacex) on [Unsplash](https://unsplash.com/photos/OHOU-5UVIYQ)\n\n",[1001,765,9],{"slug":1964,"featured":6,"template":701},"we-are-building-a-better-heroku","content:en-us:blog:we-are-building-a-better-heroku.yml","We Are Building A Better Heroku","en-us/blog/we-are-building-a-better-heroku.yml","en-us/blog/we-are-building-a-better-heroku",{"_path":1970,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1971,"content":1977,"config":1984,"_id":1986,"_type":14,"title":1987,"_source":16,"_file":1988,"_stem":1989,"_extension":19},"/en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"title":1972,"description":1973,"ogTitle":1972,"ogDescription":1973,"noIndex":6,"ogImage":1974,"ogUrl":1975,"ogSiteName":686,"ogType":687,"canonicalUrls":1975,"schema":1976},"Setting up 100 AWS Graviton Spot Runners for GitLab","Utilizing the GitLab HA Scaling Runner Vending Machine for AWS Automation to setup 100 GitLab runners on AWS Spot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669882/Blog/Hero%20Images/hundredgitlabspotrunner.png","https://about.gitlab.com/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Nupur Sharma\"}],\n        \"datePublished\": \"2021-08-17\",\n      }",{"title":1978,"description":1973,"authors":1979,"heroImage":1974,"date":1981,"body":1982,"category":741,"tags":1983},"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour",[716,1980],"Nupur Sharma","2021-08-17","Managing elastically scaled or highly available compute infrastructures is\none of the key challenges the cloud was built for. Application scaling\nconcerns can be handled by cloud services that are purpose designed,\nrigorously tested, and continually improved. This article dives into some\nspecific enablement automation that brings the benefits of AWS Autoscaling\nGroups (ASG) to runner management. There are benefits to both the largest\nfleets and single instance runners.\n\n\nEmbedded in this article is a YouTube video that demonstrates the deployment\nof 100 GitLab runners on Amazon EC2 Spot compute in less than 10 minutes\nusing less than 10 clicks. The video also shows updating this entire fleet\nin under 10 minutes to emphasize the time savings of built-in maintenace.\n\n\nThe information and automation in this article applies to GitLab Private\nRunners which are deployed on your own compute resources. Self-managed\nGitLab instances require private runners, but they can also be configured\nand used with GitLab.com SaaS accounts.\n\n\n## Well-architected runner management\n\n\nThere are many different reasons that a customer might need to deploy\nmultiple runners with various characteristics. Some of the more popular ones\nare:\n\n\n- Workloads that require large-scale runner fleets.\n\n- To gain cost savings through Spot compute, uptime scheduling, and ARM\narchitecture.\n\n- Projects with high demand of CI activity to make sure that the runner is\nnot being held up by jobs on another project.\n\n- Jobs that have special security requirements, e.g., security credentials,\nrole-based access or managed identities for Continuous Delivery (CD). These\nsecurity requirements can enable instance-level (AWS IAM Instance Profile)\nsecurity by allowing runners with sufficient rights to deploy in specific\ntarget environments. For example, a CD runner for non-production\nenvironments and a different runner for production.\n\n- Implementing role-based access control rather than user-based. This means\nusers don't have to use secrets to manage security requirements for CI jobs\nto accomplish their tasks.\n\n- Development teams can be confident the runner has the same capabilities\nfor CI and CD automation they test through their interactive logins by\nleveraging a common IAM role.\n\n\n### The challenges of building production-grade elastic GitLab Runners\n\n\n[The GitLab Runner](https://docs.gitlab.com/runner/) is the workhorse of\nGitLab CI and CD capabilities. The runner can handle numerous operating\nenvironments and automation functions for a GitLab instance. The GitLab\nRunner has become very sophisticated due to the broad range of supported\nenvironments. In order to successfully configure the GitLab Runner as a\nset-it-and-forget-it service, the user has to work through many different\ndecisions and considerations. We summarize some of the GitLab\nRunner-specific considerations that can be challenging:\n\n\n- There are a lot of configuration options and scenarios to sort through. It\ncan be an iterative process to discover what needs to be done to set up\nGitLab Runners.\n\n- Ensuring runners are a production-grade capability requires Infrastructure\nas Code (IaC) development so that high availability and scaling can be\nachieved by automatically spawning new instances.\n\n- Ensuring that runner deregistration happens correctly when GitLab Runners\nare automatically scaled in.\n\n- Additional cost-saving configurations, such as Spot compute and scheduled\nrunner uptime, can complicate the automation requirements for AWS\nAutoscaling Groups (ASGs).\n\n- Large organizations often want developers to be able to easily\nself-service deploy runners with various configurations. Service Management\nAutomation (SMA) has been made popular with products like Service Now, AWS\nService Catalog, and AWS Control Tower. This automation is compatible with\nSMA.\n\n- It can be difficult to map runners to AWS and map AWS to runners in large\norganizations with numerous runners and AWS accounts.\n\n\n### Introducing the GitLab HA Scaling Runner Vending Machine for AWS\n\n\nAn effective way to handle multiple design considerations is to make a\nreusable tool. To help you with best practice runner deployments on AWS, we\ncreated the [GitLab HA Scaling Runner Vending Machine for\nAWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/)\n(\"The GitLab Runner Vending Machine\"). It is created in AWS’ Infrastructure\nas Code, known as CloudFormation.\n\n\n> **Designed with AWS Well Architected:** This automation has many features\nbeyond the scope of this blog post. The primary focus of this blog post is\non managing costs. See the [full list of features\nhere](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md).\n\n\nThe GitLab Runner Vending Machine has the following cost management and\nscaling management benefits, exposed as a variety of parameters:\n\n\n- The ability to leverage Spot compute instances. This is important because\nit leaves CI/CD pipeline developers in charge of whether specific Gitlab\nCI/CD jobs run on Spot compute or not.\n\n- ASG-scheduled scaling so that a runner or runner fleet can be completely\nshutdown when not in use.\n\n- The GitLab Runner Vending Machine can leverage ARM compute for Linux -\nwhich runs faster and costs less.\n\n- It can also use ASG to update all runners in a fleet with the latest\nmachine images and GitLab Runner version (or a specific version). When\nmaintenance is not built-in, the labor cost of keeping things up-to-date can\nbe significant.\n\n- Runner naming and tagging in AWS and GitLab, which eases the burden of\nlocating runner instances and managing orphaned runners registrations,\nwhether it is manual or automated.\n\n\n### How to save money with The GitLab Runner Vending Machine\n\n\nSignificant savings are possible with this IaC, whether your team wants to\nsave on a single runner or a fleet of them.\n\n\nThe savings calculations below are for a single runner and should be linear\nfor a given workload. To calculate your savings for more runners, simply\nmultiply the final result by the number of runner instances. The available\n\"Runner Minutes\" per hour is calculated as the runner's job concurrency\nsetting multiplied by the minutes in an hour. For this exercise, we'll use\njob concurrency of \"10\". This number should be changed depending on the\ninstance types you are using and the load testing of your typical CI/CD\nworkloads.\n\n\nJust like most performance analysis, we are assuming that hardware resource\nutilization is optimal and consistent. If a runner cluster can sustain\nrespectable performance with 80% CPU loading, this calculation assumes that\nwould be maintained regardless of the size of the cluster.\n\n\n#### AWS Graviton ARM and Spot savings\n\n\nThe GitLab Runner engineering team has completed performance testing that\ndemonstrates performance gains of more than 30% on some AWS Graviton\n(ARM-based) instance types. Assuming that runners are performance-managed\nfor optimized utilization, this gain is a direct cost savings. Just\nrecently, we shared [how deploying GitLab on Arm-based AWS Graviton2\nresulted in cost savings of 23% and 36% performance\ngains](/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor/).\n\n\n![ARM Efficiency Test Results For GitLab\nRunner](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image1.png)\n\nGitLab Runner testing results for ARM-efficiency gains.\n\n{: .note.text-center}\n\n\n#### Scheduling savings\n\n\nThe savings can be dramatic when teams are able to turn off runners when not\nin use. For instance: Scheduling a runner to operate for 40-hours per week\nsaves 76% when compared to the cost of running it for 168 hours. Runners\nthat are just in use for 10 hours per week saves 94%.\n\n\n#### Combining scheduling, Spot, and ARM to save 97%\n\n\nJust for fun, let's see what savings are possible by comparing a standard\nrunner scenario with deploying runners in customized, stand-alone instances\nto the maximum savings automation can deliver.\n\n\nImagine I am a developer who set up a custom GitLab Runner on an m5.xlarge\ninstance, which is x86 the architecture, for a development team that works\nfor 40 hours on the same time zone. Since there is no automation, the GitLab\nRunner runs 24/7. We will assume a job concurrency of 10, which gives 600\n\"runner minutes\" per hour of run time. Scheduling uptime, running on Spot,\nand leveraging ARM can all be achieved quickly by redeploying the runner\nwith The GitLab Runner Vending Machine.\n\n\nHere is the calculation to run the configuration described above, for one\nweek: On Demand, x86, Always On: 1 x m5.xlarge = .192/hr x 168 hrs/week =\n**$32/week or $1664/year**\n\n\nHere are the savings that come from running Spot, ARM, and scheduling the\nRunner to be up just 40hrs/week: 1 x m6g.large Spot = .0419 x 40hrs/week x\n64% (36% better performance) = **$1/week**\n\n\n$1/$32 x 100 = 3.125% of the original cost for the same work. In other\nwords, **we just saved 97%** without ever impacting the ability to get the\njob done.\n\n\nIn short, The GitLab Runner Vending Machine intends to bring the many cost\nsaving mechanisms of AWS Cloud computing to your GitLab Runner fleets.\n\n\nYou can save costs by using ARM/Graviton instances, Spot compute, or by\nscheduling uptime. In many cases, you can combine all three savings\nmechanisms for maximum impact.\n\n\n### Special pipeline building concerns for Spot Runners\n\n\nSpot instances can disappear with as little as two minutes of warning. This\ninevitably means some runners will be terminated while jobs are still in\nprogress. CI/CD pipeline developers must take into account whether a job\nought to run on compute resources that can disappear with short notice (so\nshort as to be considered \"no notice\"). This comes down to deciding what\njobs are OK to run on Spot and what jobs should instead run on AWS'\npersistent compute known as \"On-Demand\".\n\n\nThe GitLab Runner Vending Machine accounts for these constraints by tagging\nrunner instances in GitLab with `computetype-spot` or `computetype-ondemand`\n– indicating in the \"tags\" segment of GitLab CI/CD jobs if a job should run\non Spot compute.\n\n\nSome types of CI workloads, e.g., mass performance testing or large unit\ntesting suites, may already have work queues and work tracking that make it\nideal for Spot compute. Other activities, e.g., polling another system for a\ndeployment status, could suffer a material discrepancy if terminated\npermaturely. Others, such as building the application, are sort of in the\nmiddle. Usually, restarting the build is sufficient.\n\n\n### Job configuration for Spot\n\n\nIf you need to reschedule terminated work, it is helpful to configure\nGitLab’s job `retry:` keyword. When working with a dispatching engine or\nwork queue that automatically accounts for incompleted work by processing\nagents, the retry configuration is unnecessary.\n\n\nHere is an example that implements both of these concepts:\n\n\n```\n\nmy-scaled-test-suite:\n  parallel: 100\n  tags:\n  - computetype-Spot\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n      - unknown_failure\n```\n\n\nThe usage and limitations of `retry:` are discussed in greater detail in the\n[GitLab CI documentation on\nretry](https://docs.gitlab.com/ee/ci/yaml/#retry).\n\n\n### How to get started\n\n\nThe CloudFormation templates for the [GitLab Runner Vending Machine are\nmanaged in a public project on\nGitLab.com](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/).\nThere is a lot of information in the project about how the solution works\nand what problems it aims to solve, and will be useful for very experienced\nAWS builders.\n\n\nBut to keep it simple for users who want the quickest path to creating\nrunners of all sizes, it also has an \"easy button\" page that has a table\nthat looks like this:\n\n\n![Easy Button Page\nSample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image2.png)\n\nThe easy buttons launch a CloudFormation Quick Create that only requires\nfilling in a few fields.\n\n{: .note.text-center}\n\n\nKeep in mind that easy buttons intentionally hide the high degree of\ncustomization that is possible with this automation by setting the\nparameters for the most common scenarios in advance. Advanced AWS users\nshould read more of the documentation in the repository to understand that\nthe GitLab Runner Vending Machine is also capable of creating sophisticated\nrunner fleets.\n\n\nFirst, click the CloudFormation icons to launch the Easy Button template\ndirectly into the CloudFormation Quick Create console. The Quick Create\nconsole is designed for simplicity to enable you to complete the prompts and\nthen click one button to launch the stack.\n\n\n![CloudFormation Quick Create\nExample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image3.png){:\n.shadow.medium.center}\n\nThis is a typical Quick Create form for the GitLab Vending Machine easy\nbuttons.\n\n{: .note.text-center}\n\n\nNext, select the deploy region by using the drop down menu in the upper\nright of the console (where the screenshot says \"Oregon\").\n\n\nIn most cases, you will only need to add your GitLab instance URL\n(GitLab.com is fine if that is where your repositories are), and the runner\ntoken, which you retrieve from the group level or project you wish to attach\nthe runners to. If you are registering against a self-managed instance, you\ncan use the instance-level tokens from the administrator console to register\nthe runner for use across the entire instance. Read on for [instructions for\nfinding Runner Registration\nTokens](https://docs.gitlab.com/runner/register/#requirements).\n\n\nA few other customization parameters are available for your convenience.\n\n\nNote that the automation attempts to use the default VPC of the region in\nwhich you deploy and the default security group for the VPC. In some\norganizations, default VPCs and/or their security groups are locked. You can\ndeploy to custom VPCs by using the full template instead of an easy button.\nOn the easy button page look for the footnote \"Not any easy button person?\"\"\nto find a link to the full template.\n\n\nWatch the video below to see the deployment of provisioning 100 GitLab Spot\nRunners on AWS in less than 10 minutes and in less than 10 clicks for just\n$5 per hour.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/EW4RJv5zW4U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nCheck out the YouTube playlist for more relevant videos about [GitLab and\nAWS](https://youtube.com/playlist?list=PL05JrBw4t0Ko30Bkf8bAvR-8E441Fy2G9)\n\n\n### This automation does much, much more\n\n\nWhile this article focused how much you can saving while using Spot for\nscaled runners, the underlying automation is capable of many other\nscenarios. Below is a summary of the additional features and benefits\ncovered in the documentation.\n\n\n- Scaled runners that are persistent (not Spot) ([see more easy buttons\nhere](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/easybuttons.md)).\n\n- Supports small, single runner setups and scaled ones.\n\n- Supports GitLab.com SaaS or self-managed instances.\n\n- Automates OS patching and Runner version upgrading.\n\n- Supports Windows and Linux.\n\n- Can be reused with Amazon provisioning services such as Service Catalog\nand Control Tower.\n\n- Implements least privilege security throughout.\n\n- Supports deregistering runners on scale-in or Spot termination.\n\n\nA full feature list is in the document [Features of GitLab HA Scaling Runner\nVending Machine for\nAWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md)\n\n\n### Easy running\n\n\nWe hope that this automation will make deployment of runners of all sizes\nsimple for you. We are open to your feedback, suggestions and contributions\nin the GitLab project.\n",[721,9,765,811],{"slug":1985,"featured":6,"template":701},"100-runners-in-less-than-10mins-and-less-than-10-clicks","content:en-us:blog:100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","100 Runners In Less Than 10mins And Less Than 10 Clicks","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",8,[679,706,728,751,773,796,818,838,863],1758326228321]