[{"data":1,"prerenderedAt":1345},["ShallowReactive",2],{"/en-us/blog/tags/aws/":3,"navigation-en-us":20,"banner-en-us":450,"footer-en-us":467,"AWS-tag-page-en-us":677},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/aws","tags",false,"",{"tag":9,"tagSlug":10},"AWS","aws",{"template":12},"BlogTag","content:en-us:blog:tags:aws.yml","yaml","Aws","content","en-us/blog/tags/aws.yml","en-us/blog/tags/aws","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":446,"_type":14,"title":447,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":377,"minimal":408,"duo":427,"pricingDeployment":436},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,187,192,298,358],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":169},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,148],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,138,143],{"text":134,"config":135},"Application Security Testing",{"href":136,"dataGaName":137,"dataGaLocation":28},"/solutions/application-security-testing/","Application security testing",{"text":139,"config":140},"Software Supply Chain Security",{"href":141,"dataGaLocation":28,"dataGaName":142},"/solutions/supply-chain/","Software supply chain security",{"text":144,"config":145},"Software Compliance",{"href":146,"dataGaName":147,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":149,"link":150,"items":155},"Measurement",{"config":151},{"icon":152,"href":153,"dataGaName":154,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[156,160,164],{"text":157,"config":158},"Visibility & Measurement",{"href":153,"dataGaLocation":28,"dataGaName":159},"Visibility and Measurement",{"text":161,"config":162},"Value Stream Management",{"href":163,"dataGaLocation":28,"dataGaName":161},"/solutions/value-stream-management/",{"text":165,"config":166},"Analytics & Insights",{"href":167,"dataGaLocation":28,"dataGaName":168},"/solutions/analytics-and-insights/","Analytics and insights",{"title":170,"items":171},"GitLab for",[172,177,182],{"text":173,"config":174},"Enterprise",{"href":175,"dataGaLocation":28,"dataGaName":176},"/enterprise/","enterprise",{"text":178,"config":179},"Small Business",{"href":180,"dataGaLocation":28,"dataGaName":181},"/small-business/","small business",{"text":183,"config":184},"Public Sector",{"href":185,"dataGaLocation":28,"dataGaName":186},"/solutions/public-sector/","public sector",{"text":188,"config":189},"Pricing",{"href":190,"dataGaName":191,"dataGaLocation":28,"dataNavLevelOne":191},"/pricing/","pricing",{"text":193,"config":194,"link":196,"lists":200,"feature":285},"Resources",{"dataNavLevelOne":195},"resources",{"text":197,"config":198},"View all resources",{"href":199,"dataGaName":195,"dataGaLocation":28},"/resources/",[201,234,257],{"title":202,"items":203},"Getting started",[204,209,214,219,224,229],{"text":205,"config":206},"Install",{"href":207,"dataGaName":208,"dataGaLocation":28},"/install/","install",{"text":210,"config":211},"Quick start guides",{"href":212,"dataGaName":213,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":215,"config":216},"Learn",{"href":217,"dataGaLocation":28,"dataGaName":218},"https://university.gitlab.com/","learn",{"text":220,"config":221},"Product documentation",{"href":222,"dataGaName":223,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":225,"config":226},"Best practice videos",{"href":227,"dataGaName":228,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":230,"config":231},"Integrations",{"href":232,"dataGaName":233,"dataGaLocation":28},"/integrations/","integrations",{"title":235,"items":236},"Discover",[237,242,247,252],{"text":238,"config":239},"Customer success stories",{"href":240,"dataGaName":241,"dataGaLocation":28},"/customers/","customer success stories",{"text":243,"config":244},"Blog",{"href":245,"dataGaName":246,"dataGaLocation":28},"/blog/","blog",{"text":248,"config":249},"Remote",{"href":250,"dataGaName":251,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":253,"config":254},"TeamOps",{"href":255,"dataGaName":256,"dataGaLocation":28},"/teamops/","teamops",{"title":258,"items":259},"Connect",[260,265,270,275,280],{"text":261,"config":262},"GitLab Services",{"href":263,"dataGaName":264,"dataGaLocation":28},"/services/","services",{"text":266,"config":267},"Community",{"href":268,"dataGaName":269,"dataGaLocation":28},"/community/","community",{"text":271,"config":272},"Forum",{"href":273,"dataGaName":274,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":276,"config":277},"Events",{"href":278,"dataGaName":279,"dataGaLocation":28},"/events/","events",{"text":281,"config":282},"Partners",{"href":283,"dataGaName":284,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":286,"textColor":287,"text":288,"image":289,"link":293},"#2f2a6b","#fff","Insights for the future of software development",{"altText":290,"config":291},"the source promo card",{"src":292},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":294,"config":295},"Read the latest",{"href":296,"dataGaName":297,"dataGaLocation":28},"/the-source/","the source",{"text":299,"config":300,"lists":302},"Company",{"dataNavLevelOne":301},"company",[303],{"items":304},[305,310,316,318,323,328,333,338,343,348,353],{"text":306,"config":307},"About",{"href":308,"dataGaName":309,"dataGaLocation":28},"/company/","about",{"text":311,"config":312,"footerGa":315},"Jobs",{"href":313,"dataGaName":314,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":314},{"text":276,"config":317},{"href":278,"dataGaName":279,"dataGaLocation":28},{"text":319,"config":320},"Leadership",{"href":321,"dataGaName":322,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":324,"config":325},"Team",{"href":326,"dataGaName":327,"dataGaLocation":28},"/company/team/","team",{"text":329,"config":330},"Handbook",{"href":331,"dataGaName":332,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":334,"config":335},"Investor relations",{"href":336,"dataGaName":337,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":339,"config":340},"Trust Center",{"href":341,"dataGaName":342,"dataGaLocation":28},"/security/","trust center",{"text":344,"config":345},"AI Transparency Center",{"href":346,"dataGaName":347,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":349,"config":350},"Newsletter",{"href":351,"dataGaName":352,"dataGaLocation":28},"/company/contact/","newsletter",{"text":354,"config":355},"Press",{"href":356,"dataGaName":357,"dataGaLocation":28},"/press/","press",{"text":359,"config":360,"lists":361},"Contact us",{"dataNavLevelOne":301},[362],{"items":363},[364,367,372],{"text":35,"config":365},{"href":37,"dataGaName":366,"dataGaLocation":28},"talk to sales",{"text":368,"config":369},"Get help",{"href":370,"dataGaName":371,"dataGaLocation":28},"/support/","get help",{"text":373,"config":374},"Customer portal",{"href":375,"dataGaName":376,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":378,"login":379,"suggestions":386},"Close",{"text":380,"link":381},"To search repositories and projects, login to",{"text":382,"config":383},"gitlab.com",{"href":42,"dataGaName":384,"dataGaLocation":385},"search login","search",{"text":387,"default":388},"Suggestions",[389,391,395,397,401,405],{"text":57,"config":390},{"href":62,"dataGaName":57,"dataGaLocation":385},{"text":392,"config":393},"Code Suggestions (AI)",{"href":394,"dataGaName":392,"dataGaLocation":385},"/solutions/code-suggestions/",{"text":109,"config":396},{"href":111,"dataGaName":109,"dataGaLocation":385},{"text":398,"config":399},"GitLab on AWS",{"href":400,"dataGaName":398,"dataGaLocation":385},"/partners/technology-partners/aws/",{"text":402,"config":403},"GitLab on Google Cloud",{"href":404,"dataGaName":402,"dataGaLocation":385},"/partners/technology-partners/google-cloud-platform/",{"text":406,"config":407},"Why GitLab?",{"href":70,"dataGaName":406,"dataGaLocation":385},{"freeTrial":409,"mobileIcon":414,"desktopIcon":419,"secondaryButton":422},{"text":410,"config":411},"Start free trial",{"href":412,"dataGaName":33,"dataGaLocation":413},"https://gitlab.com/-/trials/new/","nav",{"altText":415,"config":416},"Gitlab Icon",{"src":417,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":415,"config":420},{"src":421,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":423,"config":424},"Get Started",{"href":425,"dataGaName":426,"dataGaLocation":413},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":428,"mobileIcon":432,"desktopIcon":434},{"text":429,"config":430},"Learn more about GitLab Duo",{"href":62,"dataGaName":431,"dataGaLocation":413},"gitlab duo",{"altText":415,"config":433},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":435},{"src":421,"dataGaName":418,"dataGaLocation":413},{"freeTrial":437,"mobileIcon":442,"desktopIcon":444},{"text":438,"config":439},"Back to pricing",{"href":190,"dataGaName":440,"dataGaLocation":413,"icon":441},"back to pricing","GoBack",{"altText":415,"config":443},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":445},{"src":421,"dataGaName":418,"dataGaLocation":413},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":452,"button":453,"image":458,"config":462,"_id":464,"_type":14,"_source":16,"_file":465,"_stem":466,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":454,"config":455},"Try the Beta",{"href":456,"dataGaName":457,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":459,"config":460},"GitLab Duo Agent Platform",{"src":461},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":463},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":468,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":469,"_id":673,"_type":14,"title":674,"_source":16,"_file":675,"_stem":676,"_extension":19},"/shared/en-us/main-footer",{"text":470,"source":471,"edit":477,"contribute":482,"config":487,"items":492,"minimal":665},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":472,"config":473},"View page source",{"href":474,"dataGaName":475,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":478,"config":479},"Edit this page",{"href":480,"dataGaName":481,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":483,"config":484},"Please contribute",{"href":485,"dataGaName":486,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":488,"facebook":489,"youtube":490,"linkedin":491},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[493,516,572,601,635],{"title":46,"links":494,"subMenu":499},[495],{"text":496,"config":497},"DevSecOps platform",{"href":55,"dataGaName":498,"dataGaLocation":476},"devsecops platform",[500],{"title":188,"links":501},[502,506,511],{"text":503,"config":504},"View plans",{"href":190,"dataGaName":505,"dataGaLocation":476},"view plans",{"text":507,"config":508},"Why Premium?",{"href":509,"dataGaName":510,"dataGaLocation":476},"/pricing/premium/","why premium",{"text":512,"config":513},"Why Ultimate?",{"href":514,"dataGaName":515,"dataGaLocation":476},"/pricing/ultimate/","why ultimate",{"title":517,"links":518},"Solutions",[519,524,526,528,533,538,542,545,549,554,556,559,562,567],{"text":520,"config":521},"Digital transformation",{"href":522,"dataGaName":523,"dataGaLocation":476},"/topics/digital-transformation/","digital transformation",{"text":134,"config":525},{"href":136,"dataGaName":134,"dataGaLocation":476},{"text":123,"config":527},{"href":105,"dataGaName":106,"dataGaLocation":476},{"text":529,"config":530},"Agile development",{"href":531,"dataGaName":532,"dataGaLocation":476},"/solutions/agile-delivery/","agile delivery",{"text":534,"config":535},"Cloud transformation",{"href":536,"dataGaName":537,"dataGaLocation":476},"/topics/cloud-native/","cloud transformation",{"text":539,"config":540},"SCM",{"href":119,"dataGaName":541,"dataGaLocation":476},"source code management",{"text":109,"config":543},{"href":111,"dataGaName":544,"dataGaLocation":476},"continuous integration & delivery",{"text":546,"config":547},"Value stream management",{"href":163,"dataGaName":548,"dataGaLocation":476},"value stream management",{"text":550,"config":551},"GitOps",{"href":552,"dataGaName":553,"dataGaLocation":476},"/solutions/gitops/","gitops",{"text":173,"config":555},{"href":175,"dataGaName":176,"dataGaLocation":476},{"text":557,"config":558},"Small business",{"href":180,"dataGaName":181,"dataGaLocation":476},{"text":560,"config":561},"Public sector",{"href":185,"dataGaName":186,"dataGaLocation":476},{"text":563,"config":564},"Education",{"href":565,"dataGaName":566,"dataGaLocation":476},"/solutions/education/","education",{"text":568,"config":569},"Financial services",{"href":570,"dataGaName":571,"dataGaLocation":476},"/solutions/finance/","financial services",{"title":193,"links":573},[574,576,578,580,583,585,587,589,591,593,595,597,599],{"text":205,"config":575},{"href":207,"dataGaName":208,"dataGaLocation":476},{"text":210,"config":577},{"href":212,"dataGaName":213,"dataGaLocation":476},{"text":215,"config":579},{"href":217,"dataGaName":218,"dataGaLocation":476},{"text":220,"config":581},{"href":222,"dataGaName":582,"dataGaLocation":476},"docs",{"text":243,"config":584},{"href":245,"dataGaName":246,"dataGaLocation":476},{"text":238,"config":586},{"href":240,"dataGaName":241,"dataGaLocation":476},{"text":248,"config":588},{"href":250,"dataGaName":251,"dataGaLocation":476},{"text":261,"config":590},{"href":263,"dataGaName":264,"dataGaLocation":476},{"text":253,"config":592},{"href":255,"dataGaName":256,"dataGaLocation":476},{"text":266,"config":594},{"href":268,"dataGaName":269,"dataGaLocation":476},{"text":271,"config":596},{"href":273,"dataGaName":274,"dataGaLocation":476},{"text":276,"config":598},{"href":278,"dataGaName":279,"dataGaLocation":476},{"text":281,"config":600},{"href":283,"dataGaName":284,"dataGaLocation":476},{"title":299,"links":602},[603,605,607,609,611,613,615,619,624,626,628,630],{"text":306,"config":604},{"href":308,"dataGaName":301,"dataGaLocation":476},{"text":311,"config":606},{"href":313,"dataGaName":314,"dataGaLocation":476},{"text":319,"config":608},{"href":321,"dataGaName":322,"dataGaLocation":476},{"text":324,"config":610},{"href":326,"dataGaName":327,"dataGaLocation":476},{"text":329,"config":612},{"href":331,"dataGaName":332,"dataGaLocation":476},{"text":334,"config":614},{"href":336,"dataGaName":337,"dataGaLocation":476},{"text":616,"config":617},"Sustainability",{"href":618,"dataGaName":616,"dataGaLocation":476},"/sustainability/",{"text":620,"config":621},"Diversity, inclusion and belonging (DIB)",{"href":622,"dataGaName":623,"dataGaLocation":476},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":339,"config":625},{"href":341,"dataGaName":342,"dataGaLocation":476},{"text":349,"config":627},{"href":351,"dataGaName":352,"dataGaLocation":476},{"text":354,"config":629},{"href":356,"dataGaName":357,"dataGaLocation":476},{"text":631,"config":632},"Modern Slavery Transparency Statement",{"href":633,"dataGaName":634,"dataGaLocation":476},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":636,"links":637},"Contact Us",[638,641,643,645,650,655,660],{"text":639,"config":640},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":476},{"text":368,"config":642},{"href":370,"dataGaName":371,"dataGaLocation":476},{"text":373,"config":644},{"href":375,"dataGaName":376,"dataGaLocation":476},{"text":646,"config":647},"Status",{"href":648,"dataGaName":649,"dataGaLocation":476},"https://status.gitlab.com/","status",{"text":651,"config":652},"Terms of use",{"href":653,"dataGaName":654,"dataGaLocation":476},"/terms/","terms of use",{"text":656,"config":657},"Privacy statement",{"href":658,"dataGaName":659,"dataGaLocation":476},"/privacy/","privacy statement",{"text":661,"config":662},"Cookie preferences",{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":666},[667,669,671],{"text":651,"config":668},{"href":653,"dataGaName":654,"dataGaLocation":476},{"text":656,"config":670},{"href":658,"dataGaName":659,"dataGaLocation":476},{"text":661,"config":672},{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":678,"featuredPost":1322,"totalPagesCount":1343,"initialPosts":1344},[679,708,730,753,774,794,817,837,857,877,894,914,936,959,981,1000,1021,1040,1060,1079,1097,1116,1136,1157,1177,1197,1215,1238,1257,1279,1300],{"_path":680,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":681,"content":689,"config":701,"_id":704,"_type":14,"title":705,"_source":16,"_file":706,"_stem":707,"_extension":19},"/en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"title":682,"description":683,"ogTitle":682,"ogDescription":683,"noIndex":6,"ogImage":684,"ogUrl":685,"ogSiteName":686,"ogType":687,"canonicalUrls":685,"schema":688},"Accelerate code reviews with GitLab Duo and Amazon Q","Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750096976/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750096975734.png","https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate code reviews with GitLab Duo and Amazon Q\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":682,"description":683,"authors":690,"heroImage":684,"date":692,"body":693,"category":694,"tags":695},[691],"Cesar Saavedra","2025-06-02","Code reviews are critical for catching bugs, improving code readability, and maintaining coding standards, but they can also be a major bottleneck in your workflow. When you're trying to ship features quickly, waiting for multiple team members to review your code can be frustrating. The back-and-forth discussions, the scheduling conflicts, and the time it takes to get everyone aligned can stretch what should be a simple review into days or even weeks.\n\nHere's where [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), our new offering that delivers agentic AI throughout the software development lifecycle for AWS customers, comes in to transform your review process. This intelligent, AI-powered solution can perform comprehensive code reviews for you in a fraction of the time it would take your human colleagues. By leveraging advanced agentic AI capabilities, GitLab Duo with Amazon Q streamlines your entire review workflow without sacrificing the quality and thoroughness you need. Think of it as having an always-available, highly skilled reviewer who can instantly analyze your code and provide actionable feedback.\n\n## How it works: Launching a code review\n\nSo how does GitLab Duo with Amazon Q actually work? Let's say you've just finished working on a feature and created a merge request with multiple code updates. Instead of pinging your teammates and waiting for their availability, you simply enter a quick command in the comment section: \"/q review\". That's it – just those two words trigger the AI to spring into action.\n\n![Triggering a code review using GitLab Duo with Amazon Q](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097002/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097002096.png)\n\nOnce you've entered the command, Amazon Q Service immediately begins analyzing your code changes. You'll see a confirmation that the review is underway, and within moments, the AI is examining every line of your updates, checking for potential issues across multiple dimensions.\nWhen the review completes, you receive comprehensive feedback that covers all the bases: bug detection, readability improvements, syntax errors, and adherence to your team's coding standards. The AI doesn't just point out problems, it provides context and suggestions for fixing them, making it easy for you to understand what needs attention and why.\n\nThe beauty of this agentic AI approach is that it handles the heavy lifting of code review while you focus on what matters most: building great software. You get the benefits of thorough code reviews — better bug detection, consistent coding standards, and improved code quality — without the time sink. Your deployment times shrink dramatically because you're no longer waiting in review queues, and your entire team becomes more productive.\n\n## Why use GitLab Duo with Amazon Q?\n\nGitLab Duo with Amazon Q transforms your development workflow in the following ways:\n- Lightning-fast code reviews that don't compromise on quality\n- Consistent application of coding standards across your entire codebase\n- Immediate feedback that helps you fix issues before they reach production\n- Reduced deployment times that let you ship features faster\n- More time for your team to focus on creative problem-solving instead of repetitive reviews\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can revolutionize your code review process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4gFIgyFc02Q?si=GXVz--AIrWiwzf-I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n> \n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)","ai-ml",[696,496,697,698,699,284,9,700],"AI/ML","code review","product","features","tutorial",{"slug":702,"featured":91,"template":703},"accelerate-code-reviews-with-gitlab-duo-and-amazon-q","BlogPost","content:en-us:blog:accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","Accelerate Code Reviews With Gitlab Duo And Amazon Q","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"_path":709,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":710,"content":716,"config":724,"_id":726,"_type":14,"title":727,"_source":16,"_file":728,"_stem":729,"_extension":19},"/en-us/blog/amazon-linux-2-service-ready-partner",{"title":711,"description":712,"ogTitle":711,"ogDescription":712,"noIndex":6,"ogImage":713,"ogUrl":714,"ogSiteName":686,"ogType":687,"canonicalUrls":714,"schema":715},"GitLab is now an Amazon Linux 2 Service Ready Partner","Being an Amazon Linux 2 Service Ready partner shows GitLab's strong commitment to AWS linux distributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682451/Blog/Hero%20Images/isis-franca-hsPFuudRg5I-unsplash.jpg","https://about.gitlab.com/blog/amazon-linux-2-service-ready-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an Amazon Linux 2 Service Ready Partner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-21\",\n      }",{"title":711,"description":712,"authors":717,"heroImage":713,"date":719,"body":720,"category":721,"tags":722},[718],"Darwin Sanoy","2022-09-21","\n\nSeveral months ago, we shared that GitLab started officially supporting Amazon Linux 2 as well as providing packages for GitLab and GitLab Runner for x86 and Graviton ARM architectures.\n\nGitLab’s hard working Enablement Engineering team has taken this commitment to the next level by acquiring Amazon’s Service Ready Partner designation for Amazon Linux 2.\n\nThe AWS Service Ready program requires that GitLab provide specific evidence in regard to support, compatibility testing and security testing in order to deploy GitLab on Amazon Linux 2 with confidence.\n\nHere is GitLab’s [Amazon Linux 2 Service Ready Partner listing](https://aws.amazon.com/amazon-linux-2/partners/?partner-solutions-cards.sort-by=item.additionalFields.partnerNameLower&partner-solutions-cards.sort-order=asc&awsf.partner-solutions-filter-partner-type=*all&partner-solutions-cards.q=GitLab&partner-solutions-cards.q_operator=AND).\n\n## Amazon Linux 2 support in GitLab 15.0\n\nAmazon Linux 2 is supported in GitLab 15.0 and later. An [earlier blog](/blog/amazon-linux-2-support-and-distro-specific-packages/) discusses a variety of important points and provides some code in order to plan a smooth transition.\n\nThe Service Ready Designation has been received for version 15.3, but there were no changes made to the process from 15.0 to support the designation.\n\nGitLab Runner has had ARM64 binaries since 12.6.0 and now has Amazon Linux 2 RPM packages for those wanting package-based installs.\n\n## Inside the distribution team process for distribution support\n\nIt would be easy to think that adding support for additional Linux distros is a simple and easy process - but there is actually a lot of effort that goes into it. GitLab’s Distribution Team uses GitLab itself to apply full DevOps disciplines to the continuous building, testing and distribution of packaging for Amazon Linux. Here are just some of the steps in preparing a GitLab release for packaging:\n\n- Create an elastic scaling distro-specific CI build environment.\n- Create a distro-specific CI test environment.\n- 2380 compatibility tests are performed on the GitLab code base.\n- SAST and dependency security scanning are completed and a specific escalation procedure is applied for any vulnerabilities that are found.\n- Primary distributions such as distro specific .deb and .rpm packages are prepared specifically for each distribution.\n- Secondary distributions are done as well - this is when the official GitLab AMI is created.\n- CI builds and testing generally happen multiple times a week for Amazon Linux.\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2testsubgroups.png)\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2tests.png)\n\n## Need-to-know takeaways\n\n- GitLab is now an official Amazon Linux 2 Service Ready Partner.\n- Amazon Linux 2 RPM packages are available for GitLab from version 15.0 and for GitLab Runner.\n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/amazonlinuxandgravitonready.png){: .right}\n","engineering",[9,284,723],"DevOps",{"slug":725,"featured":6,"template":703},"amazon-linux-2-service-ready-partner","content:en-us:blog:amazon-linux-2-service-ready-partner.yml","Amazon Linux 2 Service Ready Partner","en-us/blog/amazon-linux-2-service-ready-partner.yml","en-us/blog/amazon-linux-2-service-ready-partner",{"_path":731,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":732,"content":738,"config":747,"_id":749,"_type":14,"title":750,"_source":16,"_file":751,"_stem":752,"_extension":19},"/en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"title":733,"description":734,"ogTitle":733,"ogDescription":734,"noIndex":6,"ogImage":735,"ogUrl":736,"ogSiteName":686,"ogType":687,"canonicalUrls":736,"schema":737},"Amazon Linux 2 support and distro-specific packages for GitLab","Learn how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682299/Blog/Hero%20Images/gitlab-blog-banner.png","https://about.gitlab.com/blog/amazon-linux-2-support-and-distro-specific-packages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Amazon Linux 2 support and distro-specific packages for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-02\",\n      }",{"title":733,"description":734,"authors":739,"heroImage":735,"date":740,"body":741,"category":742,"tags":743},[718],"2022-05-02","GitLab’s Distribution Engineering team has been hard at work getting Amazon\nLinux 2 distro-specific packages ready in preparation for GitLab’s official\nsupport of Amazon Linux 2. Starting with Version 15.0 of GitLab, Amazon\nLinux 2 is a supported distro and packages are available for both x86 and\nGraviton ARM architectures.\n\n\n## What is Amazon Linux 2?\n\n\nAmazon Linux 2 is the next-generation Amazon Linux operating system that\nprovides a modern application environment with the most recent enhancements\nfrom the Linux community alongside long-term support. Amazon Linux 2 is\naccessible as a virtual machine image for on-premises development and\ntesting. This lets you easily develop, test, and certify your applications\nright from your local development environment. \n\n\nAccording to the AWS FAQ page for Amazon Linux 2, the primary elements of\nthis latest version of the operating system include:\n\n\n1. A Linux kernel tuned for performance on Amazon EC2.\n\n\n2. A set of core packages including systemd, GCC 7.3, Glibc 2.26, Binutils\n2.29.1 that receive Long Term Support (LTS) from\n[AWS](/blog/deploy-aws/).\n\n\n3. An extras channel for rapidly evolving technologies that are likely to be\nupdated frequently and outside the Long Term Support (LTS) model.\n\n\nAmazon Linux 2 has a support lifespan through June 20, 2024, to allow enough\ntime for users to migrate to Amazon Linux 2022.\n\n\n\n## Safely moving forward to Amazon Linux 2 packages from EL7\n\n\nWhile Amazon Linux 2 has not been officially supported before 15.0, as a\nconvenience to customers who wanted to use yum and RPM packages to install\nthe EL7 packages, GitLab configured a workaround in our packaging services\nto direct Amazon Linux 2 yum requests to the EL7 packages. If you’ve been\nusing GitLab’s yum repo registration script, you many not know that you were\nusing EL7 packages and not distro-specific packages.\n\n\nThis workaround will be deprecated and requests from Amazon Linux 2 will get\nthe distro-specific packages with the release of GitLab 15.3.0 on August 22,\n2022.\n\n\nAs a convenience for those of you who have automation that depends directly\non this workaround, we wanted to provide you with information on how to do\nearly testing as well as how to peg your automation to the EL 7 packages\nuntil you are able to properly integrate the changes into your automation.\n\n\nGitLab documentation demonstrates how to call our managed yum repository\nsetup scripts by downloading the latest copy and running it directly in [the\ninstructions for installing\ninstances](https://about.gitlab.com/install/#centos-7) and [the instructions\nfor installing\nrunners](https://docs.gitlab.com/runner/install/linux-repository.html).\n\n\nAny organization using GitLab’s EL 7 packages for Amazon Linux 2 will want\nto test with - and update to - the distro-specific packages as soon as\npossible as GitLab will only be testing Amazon Linux 2 with the Amazon Linux\n2 specific packages going forward.\n\n\nWe also understand that the timing of the testing and migration to these\npackages must be done in a coordinated cutover so that the package type does\nnot change in your existing stacks without you having made any changes. This\ncan be more important if a GitLab stack has undergone platform qualification\nfor compliance purposes.\n\n\nAmazon Linux 2 specific packages are only available for GitLab 14.9.0 and\nlater. If your automation depends directly on GitLab’s repo configuration\nscript and it is still pegged to a GitLab version prior to 14.9.0 when this\nchange becomes GA, then action must be taken to prevent breaking that\nautomation. We have devised an idempotent two-line script solution that you\ncan put in place now to prevent disruption if you are still on a pre-14.9.0\nversion at the time the new behavior of `script.rpm.sh` becomes GA on August\n22, 2022 with the release of GitLab 15.3.0.\n\n\nGitLab rake-based backup and restore will continue to work seamlessly across\nthe distro-specific package changes if you have to restore to your Amazon\nLinux 2 built stack from an EL7 backup. If you are using third-party backup,\nyou may wish to trigger a new backup immediately after transitioning to the\nnew distro packages to avoid the scenario altogether.\n\n\n## Amazon Linux 2 packages for building GitLab instances before 15.3.0\n\n\nThe following code inserts two lines of code between those originally\noutlined in [the instructions for installing using RPM\npackages](/install/#centos-7). The first one (starts with `sed`) splices in\nthe Amazon Linux 2 yum repo endpoint edits into the repository configuration\nfile created by script.rpm.sh. The second one (starts with `if yum`) cleans\nthe yum cache if the package was already installed so that the new location\nwill be used.\n\n\n> Sudo note: If you are using these commands interactively under the default\nSSH or SSM session manager user, then using `sudo su` before running this\ncode is necessary. If you are using these commands in Infrastructure as Code\n(e.g. CloudFormation userdata scripts), then sudo may cause ‘command not\nfound’ errors when the user running automation is already root equivalent.\nBe mindful about using interactively tested commands directly in your\nautomation.\n\n\n```bash\n\n#Existing packaging script from https://about.gitlab.com/install/#centos-7\n\ncurl\nhttps://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh\n| sudo bash\n\n\n#Patch to preview and/or peg Amazon Linux 2 specific packages\n\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n\n#Existing install command (remove \"-y\" to validate package and arch mapping\nbefore install)\n\nyum install gitlab-ee -y\n\n```\n\n\n> Notice in this output that the **Version** ends in `.amazon2`. In this\ncase the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n\n![Resolved GitLab\nDependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-instance-dependencies-resolved.png)\n\n\n### Moving to Amazon Linux 2 packages early for a seamless post-GA\ntransition\n\n\nWhen the script.rpm.sh script is cut over to always point Amazon Linux 2 to\nthe new distro-specific packages, the sed command will no longer be\nnecessary. However, sed is also idempotent and will not make edits if the\nsearch text is not found. This means you can use the sed command to switch\nover early, but not have to worry about a breaking change when the\n`script.rpm.sh` is updated.\n\n\n### Pegging EL 7 and/or a GitLab version prior to 14.9.0 for a seamless\npost-GA transition\n\n\nIf your automation is pegged to an earlier version of GitLab, you will need\nto keep using EL7 packages, and, in fact, after the cutover you would need\nto implement the opposite command (which is also idempotent to be\nimplemented now).\n\n\n```bash\n\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA\nof gitlab repo script)\n\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n```\n\n\nJust like the sed command for taking distro-specific packages early, this\ncommand can be implemented immediately with no bad effects - which will\nseamlessly keeping your automation pegged to the EL 7 packages when\n`script.rpm.sh` is updated.\n\n\n## Amazon Linux 2 package for building GitLab Runners before 15.3.0\n\n\nThe following code inserts two lines of code between those originally\n[outlined in the\ninstructions](https://docs.gitlab.com/runner/install/linux-repository.html).\nThe first one (starts with `sed`) splices in the Amazon Linux 2 yum repo\nendpoint edits into the repository configuration file created by\nscript.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if\nthe package was already installed so that the new location will be used.\n\n\n> Sudo note: If you are using these commands interactively under the default\nSSH or SSM session manager user, then using `sudo su` before running this\ncode is necessary. If you are using these commands in Infrastructure as Code\n(e.g. CloudFormation userdata scripts), then sudo may cause ‘command not\nfound’ errors when the user running automation is already root equivalent.\nBe mindful about using interactively tested commands directly in your\nautomation.\n\n\n```bash\n\n#Existing packaging script from\nhttps://docs.gitlab.com/runner/install/linux-repository.html\n\ncurl -L\n\"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\"\n| sudo bash\n\n\n#Patch to test or peg Amazon Linux 2 specific packages\n\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n\n#Existing install command (remove \"-y\" to validate package and arch mapping\nbefore install)\n\nyum install gitlab-runner -y\n\n```\n\n\n> Notice in this output that **Version** is not distro-specific. In this\ncase the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n\n![Resolved GitLab Runner\nDependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-runner-dependencies-resolved.png)\n\n\n## Pegging to EL 7 and/or a GitLab Runner version prior to 14.9.1 for a\nseamless post-GA transition\n\n\nThe underlying package for EL 7 and Amazon Linux 2 is literally a copy of\nthe same package. However, the Amazon Linux 2 endpoint for Runner RPM\npackages have only been uploaded from GitLab Runner 14.9.1 and later, so if\nyou have runners that need to be on an earlier version, you would need to\nstay pointed at EL 7 for those packages to continue to resolve as available.\nThe following code shows how to do that for GitLab Runner.\n\n\n```bash\n\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA\nof gitlab repo script)\n\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n\n#Reset the cache if the package was previously installed (not needed for\ninstalls onto a clean machine)\n\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n```\n\n\n## Need-to-know takeaways\n\n\n- Amazon Linux 2 is a supported distro for GitLab instances and runner as of\nthe release of version 15.0 on May 22, 2022.\n\n- Amazon Linux 2 packages are available for x86 and ARM for GitLab Version\n14.9.0 and higher. (Prior to 14.9.0 the EL7 packages must be used as they\nhave a long version history).\n\n- This is the first availability of ARM RPM packages of GitLab for Amazon\nLinux 2.\n\n- In 15.3 (August 22, 2022), the script.rpm.sh will automatically start\ndirecting to the Amazon Linux 2 packages where it had previously directed\nAmazon Linux 2 yum requests to the EL7 packages.\n\n- It is common to have taken a dependency directly on the latest version of\nthis GitLab script in other automation.\n\n- Before the GA cutover date of August 22, 2022 (15.3.0 GitLab Release), for\nthese scripts, you have the opportunity to pre-test these packages and\ndetermine whether they create any issues with your automation or GitLab\nconfiguration.\n\n- You can also peg to the Amazon Linux 2 packages early or peg to the EL7\npackages in advance if you find problems that you need more time to resolve.\nBoth of these pegging types are idempotent, meaning the code changes do not\ndo anything that causes problems after the change over happens.\n\n- Existing Amazon Linux 2 installations that were installed using the EL7\npackages can use a regular yum upgrade command to start using the new Amazon\nLinux 2 packages. This operation may also be an upgrade of the product\nversion at the same time. For existing installations you will need to patch\nthe yum repo files as explained in this article in order to upgrade directly\nto Amazon Linux 2 from EL7 using packages. \n\n\n> **Note**\n\n> This blog post and linked pages contain information related to upcoming\nproducts, features, and functionality. It is important to note that the\ninformation presented is for informational purposes only. Please do not rely\non this information for purchasing or planning purposes. As with all\nprojects, the items mentioned in this blog post and linked pages are subject\nto change or delay. The development, release, and timing of any products,\nfeatures, or functionality remain at the sole discretion of GitLab Inc.\n\n\n![AWS Partner\nLogo](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/awsgravitonready.png){:\n.right}\n","news",[744,745,746,700,9],"releases","CI","CD",{"slug":748,"featured":6,"template":703},"amazon-linux-2-support-and-distro-specific-packages","content:en-us:blog:amazon-linux-2-support-and-distro-specific-packages.yml","Amazon Linux 2 Support And Distro Specific Packages","en-us/blog/amazon-linux-2-support-and-distro-specific-packages.yml","en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"_path":754,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":755,"content":761,"config":768,"_id":770,"_type":14,"title":771,"_source":16,"_file":772,"_stem":773,"_extension":19},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":756,"description":757,"ogTitle":756,"ogDescription":757,"noIndex":6,"ogImage":758,"ogUrl":759,"ogSiteName":686,"ogType":687,"canonicalUrls":759,"schema":760},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":756,"description":757,"authors":762,"heroImage":758,"date":764,"body":765,"category":721,"tags":766},[763],"Tim Rizzi","2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic\nContainer Registry (ECR) to GitLab. Can you help?\" This question kept coming\nup in conversations with platform engineers. They were modernizing their\nDevSecOps toolchain with GitLab but got stuck when faced with moving their\ncontainer images. While each image transfer is simple, the sheer volume made\nit daunting.\n\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done\n– pull, retag, push. But I have 200 microservices, each with multiple tags.\nI can't justify spending weeks on this migration when I have critical\ninfrastructure work.\"\n\n\n## The challenge\n\n\nThat conversation sparked an idea. What if we could automate the entire\nprocess? When platform teams move their\n[CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating\ncontainer images shouldn't be the bottleneck. The manual process is\nstraightforward but repetitive – pull each image, retag it, and push it to\nGitLab's Container Registry. Multiply this by dozens of repositories and\nmultiple tags per image, and you're looking at days or weeks of tedious\nwork.\n\n\n## The solution\n\n\nWe set out to create a GitLab pipeline that would automatically do all this\nheavy lifting. The goal was simple: Give platform engineers a tool they\ncould set up in minutes and let run overnight, waking up to find all their\nimages migrated successfully.\n\n\n### Setting up access\n\n\nFirst things first – security. We wanted to ensure teams could run this\nmigration with minimal AWS permissions. Here's the read-only identity and\naccess management (IAM) policy you'll need:\n\n\n```json\n\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n\n```\n\n\n### GitLab configuration\n\n\nWith security handled, the next step is setting up GitLab. We kept this\nminimal - you'll need to configure these variables in your CI/CD settings:\n\n\n```\n\nAWS_ACCOUNT_ID: Your AWS account number\n\nAWS_DEFAULT_REGION: Your ECR region\n\nAWS_ACCESS_KEY_ID: [Masked]\n\nAWS_SECRET_ACCESS_KEY: [Masked]\n\nBULK_MIGRATE: true\n\n```\n\n\n### The migration pipeline\n\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker\nto handle all the image operations reliably:\n\n\n```yaml\n\nimage: docker:20.10\n\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\n\nThe pipeline works in three phases, each building on the last:\n\n\n1. Discovery\n\n\nFirst, it finds all your repositories:\n\n\n```bash\n\nREPOS=$(aws ecr describe-repositories --query\n'repositories[*].repositoryName' --output text)\n\n```\n\n\n2. Tag enumeration\n\n\nThen, for each repository, it gets all the tags:\n\n\n```bash\n\nTAGS=$(aws ecr describe-images --repository-name $repo --query\n'imageDetails[*].imageTags[]' --output text)\n\n```\n\n\n3. Transfer\n\n\nFinally, it handles the actual migration:\n\n\n```bash\n\ndocker pull\n${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\n\ndocker tag\n${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\n${CI_REGISTRY_IMAGE}/${repo}:${tag}\n\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n\n```\n\n\n## What you get\n\n\nRemember that platform engineer who didn't want to spend weeks on migration?\nHere's what this solution delivers:\n\n\n- automated discovery and migration of all repositories and tags\n\n- consistent image naming between ECR and GitLab\n\n- error handling for failed transfers\n\n- clear logging for tracking progress\n\n\nInstead of writing scripts and babysitting the migration, the platform\nengineer could focus on more valuable work.\n\n\n## Usage\n\n\nGetting started is straightforward:\n\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n\n2. Configure the AWS and GitLab variables.\n\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n\n## Best practices\n\n\nThrough helping teams with their migrations, we've learned a few things:\n\n\n- Run during off-peak hours to minimize the impact on your team.\n\n- Keep an eye on the pipeline logs - they'll tell you if anything needs\nattention.\n\n- Don't decommission ECR until you've verified all images transferred\nsuccessfully.\n\n- For very large migrations, consider adding rate limiting to avoid\noverwhelming your network\n\n\nWe've open-sourced this pipeline in our public GitLab repository because we\nbelieve platform engineers should spend time building valuable\ninfrastructure, not copying container images. Feel free to adapt it for your\nneeds or ask questions about implementation.\n\n\n> #### Get started with this and other package components with our [CI/CD\nCatalog\ndocumentation](https://gitlab.com/explore/catalog/components/package).\n",[109,9,700,496,698,767],"solutions architecture",{"slug":769,"featured":91,"template":703},"automating-container-image-migration-from-amazon-ecr-to-gitlab","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":775,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":776,"content":782,"config":788,"_id":790,"_type":14,"title":791,"_source":16,"_file":792,"_stem":793,"_extension":19},"/en-us/blog/aws-devsecops-competency-partner",{"title":777,"description":778,"ogTitle":777,"ogDescription":778,"noIndex":6,"ogImage":779,"ogUrl":780,"ogSiteName":686,"ogType":687,"canonicalUrls":780,"schema":781},"GitLab achieves the AWS DevSecOps Partner Competency Specialty","The AWS DevSecOps Partner Competency Specialty demonstrates that GitLab is instrumental in helping customers implement better security while continuing to innovate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668799/Blog/Hero%20Images/securitylifecycle.png","https://about.gitlab.com/blog/aws-devsecops-competency-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab achieves the AWS DevSecOps Partner Competency Specialty\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-09-25\",\n      }",{"title":777,"description":778,"authors":783,"heroImage":779,"date":784,"body":785,"category":786,"tags":787},[718],"2023-09-25","\nGitLab recently achieved AWS's DevSecOps Partner Competency desigation, a sub-specialty for the [AWS DevOps ISV Partner Competency](https://partners.amazonaws.com/partners/001E0000018YWFfIAO/GitLab,%20Inc) category. GitLab also holds the AWS DevOps ISV Partner Competency designation. AWS's partner qualification program signifies to customers that AWS has vetted GitLab's capabilities and use cases.\n\n> Attending [AWS re:Invent 2023](https://reinvent.awsevents.com/)? Find us at Booth 1152.\n\nAccording to AWS, solutions in the [DevSecOps category](https://aws.amazon.com/devops/partner-solutions/?blog-posts-cards.sort-by=item.additionalFields.createdDate&blog-posts-cards.sort-order=desc&partner-case-studies-cards.sort-by=item.additionalFields.sortDate&partner-case-studies-cards.sort-order=desc) \"make it easy for customers to integrate security across every stage of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\" The designation comprises a [validation checklist](https://apn-checklists.s3.amazonaws.com/competency/devops/technology/CenAm4qx8.html#competencyCategories) and attestation that GitLab's DevSecOps Platform meets AWS’s expectations.\n\n## GitLab's strength in DevSecOps\nGitLab's [AI-powered DevSecOps platform](https://about.gitlab.com/gitlab-duo/) helps organizations shift left on vulnerability remediation. At GitLab, shifting left means ensuring developers have a frictionless security defect remediation experience that enables them to immediately handle vulnerabilities in their code.\n\nGitLab's DevSecOps Platform:\n- surfaces security findings shortly after they are introduced and while the code is still being worked on\n- associates findings directly with those who changed the code\n- offers remediation guidance (including on-demand training and automated fixes)\n- supports rich, in-context collaboration for vulnerability management\n\n![GitLab + AWS Workflow](https://about.gitlab.com/images/blogimages/aws/devsecops-post/gitlabawsworkflow.png)\n\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/aws/devopsisvpartner.png){: .right}\n","devsecops",[9,284,723],{"slug":789,"featured":6,"template":703},"aws-devsecops-competency-partner","content:en-us:blog:aws-devsecops-competency-partner.yml","Aws Devsecops Competency Partner","en-us/blog/aws-devsecops-competency-partner.yml","en-us/blog/aws-devsecops-competency-partner",{"_path":795,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":796,"content":802,"config":811,"_id":813,"_type":14,"title":814,"_source":16,"_file":815,"_stem":816,"_extension":19},"/en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"title":797,"description":798,"ogTitle":797,"ogDescription":798,"noIndex":6,"ogImage":799,"ogUrl":800,"ogSiteName":686,"ogType":687,"canonicalUrls":800,"schema":801},"Building GitLab with GitLab: How GitLab.com inspired Dedicated","Learn how the multi-tenancy SaaS solution, GitLab.com, influenced the design of the single-tenancy SaaS, GitLab Dedicated.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: How GitLab.com inspired Dedicated\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"},{\"@type\":\"Person\",\"name\":\"Craig Miskell\"},{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2023-08-03\",\n      }",{"title":797,"description":798,"authors":803,"heroImage":799,"date":807,"body":808,"category":721,"tags":809},[804,805,806],"Andrew Newdigate","Craig Miskell","John Coghlan","2023-08-03","\nEarlier this year, we announced [the general availability of GitLab Dedicated](https://about.gitlab.com/blog/gitlab-dedicated-available/), our single-tenancy software-as-a-service (SaaS) offering. Dedicated, which addresses the needs of customers with stringent compliance requirements while maintaining speed, efficiency, and security, was developed from the lessons we learned building and using GitLab.com, our multi-tenancy model. Although there is overlap in how we manage both platforms, such as the same service-level monitoring stack, there were significant considerations that sparked the need for new design decisions, including how we approach automation, databases, monitoring, and availability. In this blog, we share some of those decision points and their outcomes.\n\n## GitLab platform options\nBefore we dive into the evolution of GitLab Dedicated, let’s level-set on GitLab’s [portfolio of platform models](https://docs.gitlab.com/ee/subscriptions/choosing_subscription.html#choose-a-subscription):\n- GitLab.com, a.k.a. multi-tenant GitLab SaaS on our pricing page and in our documentation\n- GitLab Dedicated, single-tenant SaaS that satisfies compliance requirements such as data residency, isolation, and private networking\n- GitLab self-managed, in which customers install, administer, and maintain their own GitLab instance\n\nEach method meets the different needs of our wide range of customers and requires a unique approach for how we create, package, and deploy the application.\n\nWhile both GitLab.com and Dedicated are SaaS-based, there are key differences between the two. The multi-tenant GitLab.com is the largest hosted instance of GitLab and services thousands of customers and millions of users. Because the platform's reliability is critical to so many customers and because of the iterative nature of how GitLab.com was built, decisions have been made along the way that are unique to the scale of this specific instance.\n\nIn contrast, GitLab Dedicated is a single-tenant SaaS application that is hosted by GitLab in the customer's region of choice (GitLab.com is hosted in the U.S.). While still providing a GitLab-managed SaaS solution for our customers, Dedicated instances are fully isolated from one another, running on a platform that automates the configuration and provisioning of the instances, along with automating as many of the day-two operations as possible, such as maintenance, monitoring, and optimization.\n\nHere are some examples of how Dedicated has used the blueprint of GitLab.com.\n\n## Improved automated deployments\nGitLab.com is a permanent installation with a great deal of history, having evolved significantly since it was first developed. Originally, it was deployed on a single instance in Amazon AWS, before migrating to Microsoft Azure, where it continued to scale out. From Azure, it migrated to its current cloud, Google Cloud Platform. Since then, many customer workloads have [migrated into Kubernetes](https://about.gitlab.com/blog/year-of-kubernetes/) and are supported by the Google Kubernetes Engine ([GKE](https://cloud.google.com/kubernetes-engine)).\n\nWith GitLab Dedicated, we're building smaller instances that rely on automation, repeatability, and deterministic environments. All customer tenant GitLab instance operations must be 100% automated, including provisioning, upgrades, scaling, configuration changes, and any other routine operations. The stack relies heavily on the GitLab Environment Toolkit ([GET](https://gitlab.com/gitlab-org/gitlab-environment-toolkit/-/blob/main/docs/environment_advanced_hybrid.md)) Cloud Native Hybrid, which uses the GitLab Helm charts for stateless workloads (e.g., Rails) and Omnibus for deployments to VMs (e.g., Gitaly). GET helps with the deployments targeting [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/) and coordinating the provisioning of cloud resources, including compute instances, Kubernetes clusters, managed Postgres databases and more.\n\nAs much as GET automates, it has a certain amount of required setup, which is acceptable to perform manually for one-off or otherwise long-lived deployments, but in order to scale Dedicated we also had to automate that process, which we did with Terraform. Because this was a greenfield approach, we were able to be particularly careful with privileges. Our current cloud deployment target is AWS, so we developed a detailed identity and access management ([IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)) policy to grant each stage of deployment only the strictly necessary access. We also use IAM role assumption from trusted workloads in a central AWS account to eliminate the need for explicit credentials.  \n\nDeployments follow this process in order:\n- An account creation job running from a trusted location creates a fresh AWS account in an [AWS Organization](https://docs.aws.amazon.com/organizations/index.html), placing it in the correct Organizational Unit to automatically have a [CloudFormation StackSet](https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-cloudformation.html) applied, with ongoing updates handled by AWS when needed. This allows us to operate the entire lifecycle of the tenant account using IAM Role Assumption rather than generating and storing static IAM credentials.\n- Prepare stage sets up a fresh AWS account ready to receive a deployment; the privileges are quite high powered, but still limited to the necessary areas, including creating the next role.  \n- Onboard stage creates some high-level resources and otherwise does the setup that GET requires to be able to run, including creating the roles for the next stages with their own limited privileges.  \n- Provision stage is mostly about running GET Terraform and creating the compute and storage resources onto which GitLab will be deployed, with a few additions for our specific needs.  \n- Configure stage runs to deploy the GitLab application onto the resources created earlier. At its core, this is the GET Ansible stage, but it includes our own Terraform wrapper as well to handle our specific needs.\n\nOnce these stages complete, a fully deployed GitLab instance is ready to go.  \n\nConfiguration changes and GitLab upgrades execute the same set of stages, ensuring everything is still configured correctly and applying any pending changes. In the early days of GitLab Dedicated this was done in GitLab CI/CD pipelines operating on GitLab.com, with the tenant descriptions as JSON files in a repository, which was an effective and simple place to start.  \n\nHowever, this multi-stage deployment is now managed by [Switchboard](https://about.gitlab.com/direction/saas-platforms/switchboard/), a portal we built specifically for GitLab Dedicated. Switchboard is a bespoke Rails application, which will be the single source of truth for configuration, accessible by customers to manage customer-facing settings, as well as GitLab Dedicated staff for general management. Switchboard will be responsible for automating regular upgrades, including gradual rollouts across the fleet of Dedicated instances.\n\n## Databases geared towards the needs of single tenancy\nGitLab.com uses self-managed Postgres and Redis. For GitLab Dedicated, we wanted to leverage AWS’s managed services as much as possible. Examples include RDS, Elasticache, and OpenSearch, the AWS Elasticsearch managed service. Some of these services may not always be able to support GitLab.com-scale platforms, but they handle the traffic of a single-tenant instance well and provide reliable failovers and ongoing maintenance with no effort on our part.\n\n## Monitoring aligned with strict compliance needs\nThe observability stack for GitLab Dedicated relies on the expertise we gained from building GitLab.com. The monitoring, logging, and availability infrastructure is all maintained within the customer's AWS account, nothing is shared. We receive low-context alerts from these private systems. They serve as a mechanism to direct us to the customer account so we can review what is going on and triage the underlying issues if needed. This is helpful with regulators and compliance as nothing can leak because it doesn't leave the system.\n\nWhile Dedicated and GitLab.com share much of the same monitoring stack, Dedicated instances have tended to reveal different issues within our application. This is due to GitLab.com being a multi-tenant instance, while GitLab Dedicated instances are single-tenant. \n\nThink of the adage, \"[Your 9s are not my 9s](https://rachelbythebay.com/w/2019/07/15/giant/).\" In a platform at the scale of GitLab.com, a subset of users who encounter an issue in part of the application may be a very small percentage of the overall user base. The small impact relative to the scale of the platform may not create an alert. In a single-tenant instance, however, the same bugs or scaling issues can quickly impact a higher percentage of the overall users of the instance, escalating the issue's importance. Applying our service-level monitoring to single-tenant GitLab instances has benefited GitLab users who had encountered bugs that were overlooked in the volume of GitLab.com usage. When we identify issues in a Dedicated instance, we resolve them within the product.\n\n## High availability for all components\nConsidering the hybrid environment and the level of service that we want to offer to our customers, we have made some minor changes from the [standard reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/).\n\nOne such change is introducing high availability for all components. For the lower size (i.e., up to 2,000 users), our architecture ships by default with all the components in full redundant mode. Components like RDS and Elasticache will have a replica in a different Availability Zone. This is referred to as the primary region and we have to define how it will look in the [Geo replicas](https://docs.gitlab.com/ee/administration/geo/setup/database.html).\n\n## Only on Dedicated\nIn addition to the other changes we made, we also built some features that are only used for GitLab Dedicated:\n- Bring your own key - customers can provide and manage the encryption keys used to encrypt AWS resources such as storage, allowing a customer to revoke access should that ever become necessary. This is not something that can be offered in a multi-tenant system like GitLab.com.\n- Switchboard - as mentioned above, Switchboard was purpose-built for Dedicated. It is a multi-tenant Ruby on Rails application, accessible by GitLab Dedicated customer administrators and GitLab Dedicated team members. Using this interface, customers can change the available application runtime settings, access provided graphs, add additional products, and more. The main Switchboard instance serves as a single source of truth for global configuration and status across multiple cloud providers and regions.\n- PrivateLink networking - allows traffic between tenant AWS accounts and customer accounts without exposing data to the internet. \n- Other network features - including traffic filtering and private hosted zones.\n\nDedicated has been an exciting project and a great learning experience for our team. We were able to apply the knowledge accumulated in building GitLab.com to deliver an important new product for our customers in a very efficient way. You can learn more about GitLab Dedicated by visiting our [Dedicated page](https://about.gitlab.com/dedicated/) or contacting a GitLab sales representative.\n\n_Check out the [first installment in our \"Building GitLab with GitLab\" series](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/), which takes you behind the scenes of the development of our web API fuzz testing._\n",[9,496,810,699],"DevSecOps",{"slug":812,"featured":6,"template":703},"building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","content:en-us:blog:building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","Building Gitlab With Gitlabcom How Gitlab Inspired Dedicated","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"_path":818,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":819,"content":825,"config":831,"_id":833,"_type":14,"title":834,"_source":16,"_file":835,"_stem":836,"_extension":19},"/en-us/blog/dag-manual-fix",{"title":820,"description":821,"ogTitle":820,"ogDescription":821,"noIndex":6,"ogImage":822,"ogUrl":823,"ogSiteName":686,"ogType":687,"canonicalUrls":823,"schema":824},"How to use manual jobs with `needs:` relationships","Are you using manual jobs and needs relationship in your CI/CD pipeline? Learn more about the fix that might cause your pipeline to behave differently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683170/Blog/Hero%20Images/blog_cover2.png","https://about.gitlab.com/blog/dag-manual-fix","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use manual jobs with `needs:` relationships\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":820,"description":821,"authors":826,"heroImage":822,"date":828,"body":829,"category":721,"tags":830},[827],"Dov Hershkovitch","2021-05-20","## A bug when job `needs` a manual job\n\n\nIn [13.12 we fixed a\nbug](https://gitlab.com/gitlab-org/gitlab/-/issues/31264) that might affect\nthe existing behavior of your pipeline. We explain why we had to fix the\nbug, the possible impact of this change on your pipeline, and the proposed\nworkaround if you would like to revert this behavior.\n\n\n## Background on a two-job pipeline\n\n\nIn GitLab CI/CD you can easily configure a job to require manual\nintervention before it runs. The job gets added to the pipeline, but doesn't\nrun until you click the **play** button on it.\n\n\nLet's look at a two-job pipeline:\n\n\n```yaml\n\nstages:\n  - stage1\n  - stage2\n\njob1:\n  stage: stage1\n  script:\n    - echo \"this is an automatic job\"\n\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual # This setting turns a job into a manual one\n```\n\n\nThis is how it looks when we look at the pipeline graph:\n\n\n![image2](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog1.png){:\n.shadow.medium.center.wrap-text}\n\n\nNotice that the manual job gets skipped, and the pipeline completes\nsuccessfully even though the manual job did not get triggered. This happens\nbecause manual jobs are considered optional, and do not need to run.\n\n\nInternally, manual jobs have `allow_failure` set to true by default, which\nmeans that these skipped manual jobs do not cause a pipeline failure. The\nYAML code below demonstrates how to write the manual job, which results in\nthe same behavior. The job doesn't automatically start, is skipped, and the\npipeline passes.\n\n\n```yaml\n\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual\n  allow_failure: true # this line is redundant since manual job has this setting by default\n```\n\n\nYou can set `allow_failure` to true for any job, including both manual and\nautomatic jobs, and then the pipeline does not care if the job runs\nsuccessfully or not.\n\n\n### How to expand the configuration with `needs` (DAG)\n\n  Last year we introduced the [`needs` keyword which lets you create a Directed Acyclic Graphs (DAG) to speed up your pipeline](https://docs.gitlab.com/ee/ci/yaml/#needs). The `needs` keyword creates a dependency between two jobs regardless of their stage.\n\nLet's look at this example:\n\n\n```yaml\n\nstages:\n  - stage1\n  ....\n  - stage10\n\njob1: # this is the first job that runs in the pipeline\n  stage: stage1\n  script:\n    - echo \"exit 0\"\n.....\n\n\njob10:\n  needs:  # Defined a \"needs\" relationship with job1\n    - job1\n  stage: stage10\n  script:\n    - echo \"This job runs as soon as job1 completes, even though this job is in stage10.\"\n```\n\n\nThe `needs` keyword creates a dependency between the two jobs, so `job10`\nruns as soon as `job1` **finishes running** successfully, regardless of the\nstage ordering.\n\n\nSo what happens if a job `needs` a manual job, that doesn't start running\nautomatically?\n\n\nLet's look at the following example:\n\n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: echo \"when should this job run?\"\n  needs:\n    - test\n```\n\n\nBefore 13.12, this type of configuration would cause the pipeline to get\nstuck. The `deploy` job can only start when the `test` job completes, but\nthe `test` job does not start automatically. The rest of the pipeline stops\nand waits for someone to run the manual `test` job.\n\n\n![image3](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog2.png){:\n.shadow.medium.center.wrap-text}\n\n\nThis behavior is even worse with larger pipelines:\n\n\n![image4](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog3.png){:\n.shadow.medium.center.wrap-text}\n\n\nThe example above shows there is a needs relationship between `post test`\njob and the `test` job (which is a manual job) as you can see the pipeline\nis stuck in a running state and any subsequent jobs will not run.\n\n\nThis was not the behavior most users expected, so we improved it in 13.12.\nNow, if there is a `needs` relationship pointing to a manual job, the\npipeline doesn't stop by default anymore. The manual job is considered\noptional by default in all cases now. Any jobs that have a `needs`\nrelationship to manual jobs are now also considered optional and skipped if\nthe manual job isn't triggered. If you start the manual job, the jobs that\nneed it can start after it completes.\n\n\nNote that if you start the manual job before a later job that has it in a\n`needs` configuration, the later job will still wait for the manual job to\nfinishes running.\n\n\n## What if I don't want this new behavior?\n\n\nOne of the reasons we selected this solution is that you can quickly revert\nthis change. If you made use of this inadvertent behavior and configured\nyour pipelines to use it to block on manual jobs, it's easy to return to\nthat previous behavior. All you have to do is override the default\n`allow_failure` in the manual job with `allow_failure: false`. This way the\nmanual job is no longer optional, and the pipeline status will be marked as\nblocked and wait for you to run the job manually.\n\n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  allow_failure: false  # Set to false to return to the previous behavior.\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: exit 0\n  needs:\n    - test\n```\n\n\nShare any thoughts, comments, or questions, by opening an issue in GitLab\nand mentioning me (`@dhershkovitch`).\n",[745,9,723],{"slug":832,"featured":6,"template":703},"dag-manual-fix","content:en-us:blog:dag-manual-fix.yml","Dag Manual Fix","en-us/blog/dag-manual-fix.yml","en-us/blog/dag-manual-fix",{"_path":838,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":839,"content":845,"config":851,"_id":853,"_type":14,"title":854,"_source":16,"_file":855,"_stem":856,"_extension":19},"/en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"title":840,"description":841,"ogTitle":840,"ogDescription":841,"noIndex":6,"ogImage":842,"ogUrl":843,"ogSiteName":686,"ogType":687,"canonicalUrls":843,"schema":844},"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS","Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2025-01-16\",\n      }",{"title":840,"description":841,"authors":846,"heroImage":842,"date":848,"body":849,"category":694,"tags":850},[847],"Jackie Porter","2025-01-16","We are thrilled to announce the GitLab Duo with Amazon Q offering, previously [shared at AWS 2024 re:Invent](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai/), is now available in Preview (Beta) for GitLab Self-Managed Ultimate users, at no additional cost. This milestone brings AI agentic experiences to organizations that maintain their own GitLab instance.\n\n### What does this mean for GitLab Self-Managed Ultimate customers? \n\nBeginning in our [17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/), GitLab Self-Managed Ultimate customers can now take advantage of the GitLab Duo with Amazon Q Preview (Beta) [capabilities](https://docs.gitlab.com/ee/user/duo_amazon_q/). There are three key experiences you will be able to access: \n- AI-powered feature development: Use the `/q dev` quick action to transform requirements into merge-ready code.\n- Automated code reviews: Leverage `/q review` for instant, intelligent feedback on code quality and security.\n- Java modernization: Streamline Java application upgrades with `/q transform`.\n\n### Getting started with the Preview (Beta) \n\nTo use these capabilities in your GitLab Self-Managed Ultimate instance:\n\n- Ensure you meet the [prerequisites](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html#prerequisites), including upgrading to GitLab 17.8, have an Ultimate subscription (no trial access), and have the instance hosted on AWS. \n- Enable your GitLab Duo with Amazon Q integration settings.\n- Configure IAM identity and roles in AWS and the GitLab AI gateway.\n- Add the Amazon Q user to the project. \n\nFor more detailed setup information, see our [documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). \n\n### Looking ahead\n\nThis Preview release represents our commitment to bringing enterprise-grade AI capabilities to all GitLab Ultimate customers. We're excited to work closely with our customers during this Preview (Beta) period to ensure GitLab Duo with Amazon Q delivers a superior experience. \nWe encourage GitLab Self-Managed Ultimate customers to begin exploring these capabilities and provide feedback. Your input will be invaluable in shaping the future of AI-powered development in GitLab.\n\n### Get started today \n\nGitLab Self-Managed Ultimate customers can begin enabling and configuring GitLab Duo with Amazon Q as outlined in our [setup documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). To learn more about how the Preview (Beta) release can transform your software development, visit our [website](https://about.gitlab.com/partners/technology-partners/aws/#interest). \nStay tuned for regular updates as we continue to enhance and expand the capabilities of GitLab Duo with Amazon Q.\n",[696,496,698,9,742],{"slug":852,"featured":6,"template":703},"devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","content:en-us:blog:devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","Devsecops Agentic Ai Now On Gitlab Self Managed Ultimate On Aws","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"_path":858,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":859,"content":865,"config":871,"_id":873,"_type":14,"title":874,"_source":16,"_file":875,"_stem":876,"_extension":19},"/en-us/blog/eks-fargate-runner",{"title":860,"description":861,"ogTitle":860,"ogDescription":861,"noIndex":6,"ogImage":862,"ogUrl":863,"ogSiteName":686,"ogType":687,"canonicalUrls":863,"schema":864},"Setting up GitLab EKS Fargate Runners in just one hour","This detailed tutorial answers the question of how to leverage Amazon's AWS Fargate container technology for GitLab Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663373/Blog/Hero%20Images/jeremy-lapak-CVvFVQ_-oUg-700unsplash.jpg","https://about.gitlab.com/blog/eks-fargate-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-05-24\",\n      }",{"title":866,"description":861,"authors":867,"heroImage":862,"date":868,"body":869,"category":721,"tags":870},"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1",[718],"2023-05-24","Leveraging Amazon's AWS Fargate container technology for [GitLab\nRunners](https://docs.gitlab.com/runner/) has been a longstanding ask from\nour customers. This tutorial gets you up and running with the GitLab EKS\nFargate Runner combo in less than an hour.\n\n\nGitLab has a pattern for this task for\n[Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html)\nrunners under AWS Elastic Container Service (ECS). The primary challenge\nwith this solution is that AWS ECS itself does not allow for the overriding\nof what image is used when calling an ECS task. Therefore, each GitLab\nRunner manager ignores the gitlab-ci.yml `image:` tag and runs on the image\npreconfigured in the task during deployment of the runner manager. As a\nresult, you'll end up creating runner container images that contain every\ndependency for all the software built by the runner, or you'll create a lot\nof runner managers per image — or both.\n\n\nI have long wondered if Fargate-backed Elastic Kubernetes Service (EKS)\ncould get around this limitation since, by nature, Kubernetes must be able\nto run any image given to it.\n\n\n## The approach\n\n\nNothing takes the joy out of learning faster than a lot of complex setup\nbefore being able to get to the point of the exercise. To address this, this\ntutorial uses four things to dramatically reduce the time and steps required\nto get from zero to hero.\n\n\n1. AWS CloudShell to minimize the EKS Admin Tooling setup. This also leaves\nyour local machine environment untouched so that other tooling\nconfigurations don't get modified.\n\n2. A project called **AWS CloudShell ”Run From Web” Configuration Scripts**\nto rapidly add additional tooling to CloudShell. This includes some hacks to\nget large Terraform templates to work on AWS CloudShell.\n\n3. EKS Blueprints — specifically, a Terraform example that implements both\nthe [Karpenter\nautoscaler](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/)\nand Fargate, including for the kube-system namespace.\n\n4. A simple Helm install for GitLab Runner.\n\n\nAlthough you will be running CLI commands and editing config files, no\ncoding is required in the sense that you won't have to build something\ncomplex from scratch and then maintain it yourself.\n\n\n## The results\n\n\nIt works! It can run 2 x 200 (max allowed per job) parallel “Hello, World”\njobs on AWS Fargate-backed EKS in about 4 minutes, which demonstrates the\nunlimited scalability. It can also run a simple Auto DevOps pipeline, which\nproves out the ability to run a bunch of different containers.\n\n\nThe fact that the entire cluster - including kube-system - is Fargate backed\nreduces the Kubernetes specific long term SRE work to a much lower value\napproaching that of ECS Fargate clusters. Later on we discuss that this\ntrade-off has a cost and how it can be reconfigured.\n\n\n## What makes it possible: Product-managed IaC that is an extensible\nframework\n\n\nToolkitting made up of Infrastructure as Code (IaC) is frequently referred\nto as “templates,” and these templates have a reputation of not aging well\nbecause there is no active stewardship of the codebase — they are thought of\nas a one-and-done effort. However, this term does not reflect reality well\nwhen the underlying IaC code is actually being product-managed. You can tell\nif something is being product-managed by using these markers:\n\n\n- It has a scope-bounded vision of what it wants to do for the community\nbeing served (customer).\n\n- It has active stewardship that keeps the codebase moving along, even if it\nis open source.\n\n- It seeks to incorporate strategic enhancements, a.k.a. new features.\n\n- Things that are broken are considered bugs and are actively eliminated.\n\n- There is a cadence of taking underlying version updates and for supporting\nnew versions of the primary things they deploy.\n\n\nAs an extensible framework, EKS Blueprints:\n\n\n- Are purposefully architected to be extended by anyone.\n\n- Already have many extensions built.\n\n\nWhen implementing using EKS Blueprints and you come upon a new need, it is\nimportant to check if EKS Blueprints already handles that consideration -\nsimilarly to how you would look for Ruby Gems, NPM Modules or Python PyPI\npackages before building functionality from scratch.\n\n\nAll of the above are aspects of how the AWS EKS team is product-managing EKS\nBlueprints. They deserve a big round of applause because product-managing\nanything to prevent it from becoming yet another community-maintained\nshelfware project is a strong commitment that requires tenacity!\n\n\n## Reproducing the experiment\n\n\n### 1. Set up AWS CloudShell\n\n\n> **Note:** If you already have a fully persistent environment setup (like\nyour laptop) with: AWS CLI, kubectl, Terraform, then you can avoid\nenvironment rebuilds when AWS CloudShell times out by using that instead.\n\n\nAWS CloudShell comes with kubectl, Git, and AWS CLI, which are all needed.\nHowever, we also need a few other scripts. More information about these\nscripts can be read in [my blog post on AWS CloudShell “Run For Web”\nConfiguration\nScripts](https://missionimpossiblecode.io/aws-cloudshell-run-from-web-configuration-scripts).\n\n\n> **Note:** The steps in this section up through the `git clone` from GitLab\nstep (second clone operation) in the next section can be accomplished by\nrunning this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL\nhttps://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s}\n-o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}*` .\n\n\n1. Use the web console to login to an AWS account where you have admin\npermissions.\n\n2. Switch to the region of your choosing.\n\n3. In the bottom left of the console click the “CloudShell” icon.\n\n4. Copy and paste the following one-liner into the console to install Helm,\nTerraform, and the Nano text editor:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n5. Since our Terraform template will grow larger than the 1GB limit of space\nin the $HOME directory, we need a workaround to use the template in one\ndirectory, but store the Terraform state in $HOME where it will be kept as\nlong as 120 days. The following one-liner triggers a script that performs\nthat setup for us, after which we can use the /terraform directory for our\ntemplate:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n\n### 2. Run Terraform EKS Blueprint\n\n\n> **Note:** If at any time you leave your AWS CloudShell long enough for\nyour session to end, the /terraform directory will be tossed. Simply run the\nlast script above and the first four steps below to make it operable again.\nThis will most likely be necessary when it comes time to teardown the\nTerraform created AWS resources.\n\n>\n\n> Sometimes your AWS CloudShell credentials may expire with a message like:\n`Error: Kubernetes cluster unreachable: Get \">CLUSTER URL>\": getting\ncredentials: exec: executable aws failed with exit code 255`. Simply refresh\nthe entire browser tab where AWS CloudShell is running and you’ll generally\nhave new credentials.\n\n\n#### Version safety\n\n\nThis tutorial uses a specific release of the EKS Blueprint project so that\nyou have the known state at the time of publishing. The project version also\ncascades into the versions of all the many dependent modules. While it may\nalso work with the latest version, the version at the time of writing was\nVersion 4.29.0.\n\n\nThis tutorial also uses Terraform binary Version 1.4.5.\n\n\n#### Procedures\n\n\nIf, while using AWS CloudShell, you experience this error: `Error:\nconfiguring Terraform AWS Provider: no valid credential sources for\nTerraform AWS Provider found`, you will need to refresh your browser to\nupdate the cached credentials in the terminal session.\n\n\nPerform the following commands on the AWS CloudShell session:\n\n\n1. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git\n--no-checkout /terraform/terraform-aws-eks-blueprints` \n\n2. `cd /terraform/terraform-aws-eks-blueprints/`\n\n3. `git reset --hard tags/v4.29.0` #Version pegging to the code that this\narticle was authored with.\n\n4. `git clone\nhttps://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git\n/terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   **Note:** Like other EKS Blueprints examples, the GitLab EKS Fargate Runner example references EKS Blueprint modules with a relative directory reference. This is why we are cloning it into a subdirectory of the EKS Blueprints project.\n5. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n6. `terraform init`\n\n   **Important**: If you are using AWS CloudShell and your session times out, the /terraform folder and the installed utilities will be gone. You would have to reproduce the above steps to get the Terraform template in a usable state again. This is most likely to happen when you go to use Terraform to delete the stack after playing with it for some days.\n\n   The next few instructions are from: **https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-deploy**. Note the `-state` switch ensures our state is in persistent storage.\n7. `terraform apply -target module.vpc -state=$HOME/tfstate/runner.tfstate`\n\n8. `terraform apply -target module.eks -state=$HOME/tfstate/runner.tfstate`\n\n9. **Note:** If you receive “Error: The configmap ”aws-auth” does not\nexist”, re-run the same command - it will usually update successfully.\n\n10. `terraform apply -state=$HOME/tfstate/runner.tfstate`\n\n\nThe previous command will output a kubeconfig command that needs to be run\nto ensure subsequent kubectl commands work. Run that command. If you are in\nAWS CloudShell and did not copy the command, this command should work and\nmap to the correct region:\n    `aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name \"glrunner\"`\n\nIf everything was done correctly, you will have an EKS cluster named\n`karpenter` in the CloudShell region web console like this:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/eksclusterinconsole.png)  \n\n\nAnd the output of this console command `kubectl get pods -A` will look like\nthis:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/cliplaincluster.png)\n\n\nThe output of this console command `kubectl get nodes -A` will show the\nFargate prefix:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/clinodesarefargate.png)\n\n\n> **Note:** Notice that all the EKS extras (coredns, ebs-cni, and karpenter\nitself) are also running on Fargate. If you are willing to tolerate some\nregular Kubernetes nodes, you may be able to save cost by running always-on\npods on regular Kubernetes hosts. Since this cluster runs Karpenter, you\nwill not need to manually scale those hosts and EKS makes control plane and\nnode updates easier.\n\n\n### 3. Install GitLab Runner\n\n\nThese and other commands are available in the GitLab documentation for\n[GitLab Runner Helm\nChart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n\n1. Create an empty GitLab project.\n\n2. Retrieve a GitLab Runner Token from the project. Keep in mind that using\na project token is the easiest way to ensure your experiment runs only on\nthe EKS Fargate Runner. Using a group token may cause your job to run on\nother runners already setup at your company. You can follow [“Obtain a\ntoken”](https://docs.gitlab.com/runner/register/#requirements) from the\ndocumentation if you need to.\n\n3. Perform the following commands back in the AWS CloudShell session.\n\n4. `nano runnerregistration.yaml`\n\n5. Paste the following:\n\n   ```yaml\n   gitlabUrl: https://_YOUR_GITLAB_URL_HERE_.com\n   runnerRegistrationToken: _YOUR_GITLAB_RUNNER_TOKEN_HERE_\n   concurrent: 200\n   rbac:\n     create: true\n   runners:\n     tags: eks-fargate\n     runUntagged: true\n     imagePullPolicy: if-not-present\n   envVars:\n     - name: KUBERNETES_POLL_TIMEOUT\n       value: 90  \n   ```\n\n   **Note:** Many more settings are discussed in the documentation for the [Kubernetes Executor](https://docs.gitlab.com/runner/executors/kubernetes.html). \n\n**Hard Lesson:** Using a setting for `concurrent` that is lower than our\n`parallel` setting in the GitLab job below results in all kinds of failures\ndue to some job pods having to wait for an execution slot. Since it’s\nFargate, there is no savings to keeping it lower and no negative impact to\nmaking it the complete parallel amount.\n\n\n6. Replace \\_YOUR_GITLAB_URL_HERE_ with your actual GitLab URL.\n\n7. Replace \\_YOUR_GITLAB_RUNNER_TOKEN_HERE_ with your actual runner token.\n\n8. Press CTRL-X to exit and press Y to the save prompt.\n\n9. `helm repo add gitlab https://charts.gitlab.io`\n\n10. `helm repo update gitlab`\n\n11. `helm install --namespace gitlab-runner --create-namespace runner1 -f\nrunnerregistration.yaml gitlab/gitlab-runner`\n\n12. Wait for a few minutes and check the project’s list of runners for a new\none with the tag `eks-fargate`\n\n\nIn AWS CloudShell the command `kubectl get pods -n gitlab-runner` should\nproduce output similar to this:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/runnerlist.png)\n\n\nAnd in the GitLab Runner list, it will look similar to this:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/glrunnerlist.png)\n\n\n### 4. Run a test job\n\n\nThe simplest way to test GitLab Runner scaling is using the `parallel:`\nkeyword to schedule multiple copies of a job. It can also be used to create\na job matrix where not all jobs do the same thing.\n\n\nOne or more GitLab Runner Helm deployments can live in any namespace, so you\nhave many to many mapping flexibility for how you think of runners and their\nKubernetes context.\n\n\nIn the GitLab project where you created the runner, use the web IDE to\ncreate .gitlab-ci.yml and populate it with the following content:\n   ```yaml\n   parallel-fargate-hello-world:\n     image: public.ecr.aws/docker/library/bash\n     stage: build\n     parallel: 200\n     script:\n       - echo \"Hello Fargate World\"\n   ```\n\n**Hard Lesson:** After hitting the Docker hub image pull rate limit, I\nshifted to the same container in the AWS Public Elastic Container Registry\n(ECR), which has an [image pull rate\nlimit](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html)\nof 10 per second for this scenario.\n\n\nIf the job does not automatically start, use the pipeline page to force it\nto run.\n\n\nIf everything is configured correctly, your final pipeline status panel\nshould look something like this:\n\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/completedjobs.png)\n\n\n### 5. Runner scaling experimentation\n\n\nThese and other commands are available in the GitLab documentation for\n[GitLab Runner Helm\nChart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n\nAdditional runners can be added by re-running the install command with a\ndifferent name for the runner (if using the same token you’ll have two\nrunners in the same group or project):\n\n\n`helm install --namespace gitlab-runner runner2 -f runnerregistration.yaml\ngitlab/gitlab-runner`\n\n\n200 jobs takes just under 2 minutes.\n\n\n#### 400 parallel jobs\n\n\nBy setting up a second identical job (with a unique job name), I was able to\nprocess 400 total jobs.\n\n\n**Hard Lesson:** The runner likes to schedule all jobs in a parallel job on\nthe same runner instance. It does not seem to want to split a large job\nacross multiple runners registered in the same project. So in order to get\nmore than 200 jobs to process, I had to have two registered runners set to\n`concurrent:200` and two seperate jobs set to `parallel: 200`\n\n\n400 jobs takes just over 3 minutes.\n\n\n#### More than 400 parallel jobs\n\n\nAs I tried to scale higher, jobs started to hang. I tried specifically\nrouting jobs to five runners each capable of 300 parallel jobs. I also tried\nmultiple stages and used a hack of `needs []` to get simultaneous execution\nof jobs in multiple stages.\n\n\nI was not successful and there could be a wide variety of reasons why — a\nriddle for a future iteration.\n\n\nThis command can be used to update a runner's settings after editing the\nHelm values file (including the token to move the runner to another\ncontext): \n\n\n`helm upgrade --namespace gitlab-runner -f runnerregistration.yaml runner2\ngitlab/gitlab-runner`\n\n\nI found that when I pushed the limits, I would sometimes end up with hung\npods until I understood what needed adjusting. Leaving hung Fargate pods\nwill add up to a lot of cash because the pricing assumes very short\nexecution times. This command helps you terminate job pods without\naccidentally terminating the runner manager pods:\n\n\n`kubectl get pods --all-namespaces --no-headers |  awk '{if ($2 ~\n\"_YOUR_JOB_POD_PREFACE_*\") print $2}' | xargs kubectl -n\n_YOUR_RUNNER_NAMESPACE_ delete pod`\n\n\nDon't forget to replace \\_YOUR_RUNNER_NAMESPACE_ and \\_YOUR_JOB_POD_PREFACE_\n“_YOUR_JOB_POD_PREFACE\\_” is the unique preface of ONLY the jobs from a\ngiven runner followed by the wildcard star character => \\*\n\n\nTo uninstall a runner, use:\n\n\n`helm delete --namespace gitlab-runner runner1`\n\n\n#### Testing Auto DevOps to prove `image:` tag is honored\n\n\nTechnically testing Auto DevOps to prove the `image:` tag is honored this\nisn’t entirely necessary since the above job loads the bash container\nwithout the container being specified in any of the runner or infrastructure\nsetup. However, I performed this as a litmus test anyway.\n\n\nFollow these steps:\n\n\n1. Create a new project by clicking the “+” sign in the top bar of GitLab.\n\n2. On the next page, select “New Project/Repository”.\n\n3. Then “Create from template”.\n\n4. Select “Ruby on Rails” (first choice).\n\n5. Once the project creation is complete, register an EKS runner to it (or\nre-register the existing runner to the new project).\n\n6. In the project, select “Settings (Gear Icon)” => “CI/CD” => Auto DevOps\n=> Default to Auto DevOps pipeline.\n\n7. Click “Save changes”.\n\n\nThe Auto DevOps pipeline should run. If you don’t have a cluster wired up,\nit will mainly do security scanning, which is sufficient to prove that\narbitrary containers can be used by the Fargate-backed GitLab Runner.\n\n\n### 6. Solution tuning via extensible platform\n\n\nEKS Blueprints is not only product-managed, it is also an extensible\nplatform or framework. In the spirit of fully leveraging the extensible\nproduct managed EKS Blueprints project, you will always want to check if\nBlueprints is already instrumented for your scenario before writing code.\nAdditionally, if you must write code, you can consider contributing it as an\nEKS Blueprint extension so the community can take on some responsibility for\nmaintaining it.\n\n\n1. The EKS Blueprints Managed IaC has a dizzing number of tuning parameters\nand optional extensions. For instance, if you want the full GitLab Runner\nlogs collected to AWS CloudWatch, it is a simple configuration to add\nfluentd log agent to push custom logs to CloudWatch.\n\n2. Using Fargate for always-on containers is a trade-off of compute costs to\nget rid of Kubernetes node management overhead. This trade-off can be easily\nreversed in this example by removing the \"kube-system\" from\n\"fargate_profiles\" - since Karpenter is also installed and configured, the\nhosts will autoscale for load.\n\n\n### 7. Teardown\n\n\nThe next few instructions are from\nhttps://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-destroy.\n\n\nIf you are using AWS CloudShell and the /terraform directory no longer\nexists, perform these steps to re-prepare AWS CloudShell to perform\nteardown.\n\n\nIf you are not using AWS CloudShell, skip forward to “Teardown steps”.\n\n\n1. `curl -sSL\nhttps://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh\n-o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n\n2. `curl -sSL\nhttps://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh\n-o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash\n$HOME/prep-for-terraform.sh`\n\n3. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git\n--no-checkout /terraform/terraform-aws-eks-blueprints` \n\n4. `cd /terraform/terraform-aws-eks-blueprints/`\n\n5. `git reset --hard tags/v4.29.0`\n\n6. `git clone\nhttps://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git\n/terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   > **Note:** The above steps can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}` .\n\n7. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n8. `terraform init`\n\n\nFollow these teardown steps:\n\n\n1. `helm delete --namespace gitlab-runner runner1`\n\n2. `helm delete --namespace gitlab-runner runner2`\n\n3. `terraform destroy -target=\"module.eks_blueprints_kubernetes_addons\"\n-auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n4. `terraform destroy -target=\"module.eks\" -auto-approve\n-state=$HOME/tfstate/runner.tfstate`\n\n5. **Note:** If you receive an error about refreshing cached credentials,\nsimply re-run the command again and it will usually update successfully.\n\n6. `terraform destroy -auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n\n### Iteration _n_ : We would love your input\n\n\nThis blog is \"Iteration 1\" precisely because it has not been production\nload-tested nor specifically cost-engineered. And obviously a “Hello, World”\nscript is not testing much in the way of real work. I really set out to\nunderstand if we could run arbitrary containers in a GitLab Fargate setup\n(and we can) and then got curious about what parallel job scaling might look\nlike with Fargate (and it looks good). The Kubernetes Runner executor has\nmany, many available customizations and it is likely that scaling a\nproduction loaded implementation on EKS will reveal the need to tune more of\nthese parameters. \n\n\n#### **Collaborative contribution challenges**\n\n\nHere are some ideas for further collaborative work on this project:\n\n\n- To push the limits, create a configuration that can scale to 1000\nsimultaneous jobs.\n\n- An aws-logging config map that uploads runner pod logs to AWS CloudWatch.\n\n- A cluster configuration where runner managers and everything that is not a\nrunner job run on non-Fargate nodes – if and only if it will be cheaper than\nFargate running 24 x 7.\n\n- A Fargate Spot configuration. It’s important that compute type be noted as\na runner tag and it’s important that the same cluster has non-spot instances\nbecause some jobs should not run on spot compute and the decision whether to\ndo so should be available to the GitLab CI Developer who is creating an\npipeline.\n\n\n#### Other runner scaling initiatives\n\n\nWhile GitLab is building the Next Runner Auto-scaling Architecture,\n[Kubernetes refinements are not a part of this architectural\ninitiative](https://docs.gitlab.com/ee/architecture/blueprints/runner_scaling/#proposal).\n\n\n#### Everyone can contribute\n\n\nThis tutorial, as well as code for additional examples, will be maintained\nas open source as a GitLab Alliances Solution and we’d love to have your\ncontributions as you iterate and discover the configurations necessary for\nyour real-world scenarios. This tutorial is in a group wiki and the code\nwill be in the projects under that group here: [AWS Guided Explorations for\nEKS Runner\nConfigurations](https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate/-/blob/main/README.md). \n\n\nPhoto by [Jeremy\nLapak](https://unsplash.com/@jeremy_justin?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/runner?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n",[723,767,9],{"slug":872,"featured":6,"template":703},"eks-fargate-runner","content:en-us:blog:eks-fargate-runner.yml","Eks Fargate Runner","en-us/blog/eks-fargate-runner.yml","en-us/blog/eks-fargate-runner",{"_path":878,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":879,"content":882,"config":888,"_id":890,"_type":14,"title":891,"_source":16,"_file":892,"_stem":893,"_extension":19},"/en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"noIndex":6,"title":880,"description":881},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",{"title":880,"description":881,"authors":883,"heroImage":842,"date":884,"body":885,"category":694,"tags":886},[691],"2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[696,698,887,810,700,9],"testing",{"featured":91,"template":703,"slug":889},"enhance-application-quality-with-ai-powered-test-generation","content:en-us:blog:enhance-application-quality-with-ai-powered-test-generation.yml","Enhance Application Quality With Ai Powered Test Generation","en-us/blog/enhance-application-quality-with-ai-powered-test-generation.yml","en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"_path":895,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":896,"content":902,"config":908,"_id":910,"_type":14,"title":911,"_source":16,"_file":912,"_stem":913,"_extension":19},"/en-us/blog/environment-friction-cycle",{"title":897,"description":898,"ogTitle":897,"ogDescription":898,"noIndex":6,"ogImage":899,"ogUrl":900,"ogSiteName":686,"ogType":687,"canonicalUrls":900,"schema":901},"How GitLab eliminates value stream friction in dev environments","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682507/Blog/Hero%20Images/sandeep-singh-3KbACriapqQ-unsplash.jpg","https://about.gitlab.com/blog/environment-friction-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-11-17\",\n      }",{"title":903,"description":898,"authors":904,"heroImage":899,"date":905,"body":906,"category":721,"tags":907},"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup",[718],"2022-11-17","A strong DevOps value stream drives developer empowerment as far left as\npossible. In GitLab, this is embodied in per-feature branch merge requests\nthat are rich with automated code quality and defect information - including\nnot only findings - but automated remediation capabilities and\ncollaboration. Some defects and code quality issues can only be found by\nanalyzing a running copy of the application, including DAST, IAST, fuzzing\nand many others. GitLab has built a fully automated, seamless developer\nenvironment lifecycle management approach right into the developer\nexperience. In fact, it’s so seamlessly built-in, it can be easy to overlook\nhow critical developer environment lifecycle management is. This article\nwill highlight why and how GitLab adds value using developer environment\nautomation. In addition, while GitLab provides out of the box developer\nenvironment lifecycle management for Kubernetes, this article demonstrates\nan approach and a working example of how to extend that capability to other\ncommon cloud-based application framework PaaS offerings.\n\n\n## Provisioning of development environments is generally a negative feedback\nloop\n\n\nIn a prior job, I worked on a DevOps transformation team that supported\nmultiple massive shared development environments in AWS. They were\naccessible to more than 4,000 developers working to build more than 100 SaaS\napplications and utility stacks. In the journey to the AWS Cloud, each\ndevelopment team took ownership of the automation required to deploy their\napplications. Since developers were able to self-service, over time this\nsolved the problem of development friction generated by waiting for\nenvironments to be provisioned for testing, feature experiments, integration\nexperiments, etc. \n\n\nHowever, the other half of the problem then ballooned - environment sprawl -\nwith an untold number of environments idling without management and without\nknowledge of when they could be torn down. Over time the development\nenvironment cost became a significant multiple of production costs. The\ncloud has solved problems with environment provisioning bottlenecks due to\nhardware acquisition and provisioning, but this can also inadvertently fuel\nthe high costs of unmanaged sprawl. This problem understandably causes\norganizations to raise administrative barriers to new development\nenvironments.\n\n\nIn many organizations this becomes a vicious cycle - most especially if\ndeveloper environments are operated by a different team, or worse, on an\nindependent budget. Environment justification friction usually comes quickly\nafter discovering the true cost of the current running environments.\nDevelopers then have to justify the need for new environment requests and\nthey have to make the gravest of promises to disband the environment as soon\nas they are done. Another friction arises when a separate group is tasked\nwith cost controls and environment provisioning and cleanup. This introduces\nfriction in the form of administrative and work queueing delays.\nCoordination friction also crops up because an accurate understanding of\nexactly what is needed for an environment can be challenging to convey. When\nmistakes are made or key information is missing, developers must go back and\nforth on support requests to get the configuration completely correct.\n\n\n## Partial automation can worsen the problem\n\n\nThat’s the first half of the environment lifecycle, but as I mentioned, even\nif that is fully automated and under the control of developers, the other\nhalf of the feedback loop comes into play. When a given development\nenvironment has fulfilled its initial justification reason, the team does\nnot want to destroy it because environments are so hard to justify and\ncreate. Then the sprawl starts and, of course, the barriers to new\nenvironments are raised even higher. This is a classic negative feedback\nloop.\n\n\nSystems theory shows us that sometimes there are just a few key factors in\nstopping or even reversing a negative feedback loop. Lets take this specific\nproblem apart and talk about how GitLab solves for it.\n\n\n## Treat developer environments as a complete lifecycle\n\n\nIn the prior example it is evident that by leaving out the last stage of the\nenvironment lifecycle - retirement or tear down - we still end up with a\nnegative feedback loop. Removing provisioning friction actually makes the\nproblem worse if retirement friction is not also addressed at the same time.\nSolutions to this problem need to address the entire lifecycle to avoid\nimpacting value stream velocity. Neglecting or avoiding the retirement stage\nof a lifecycle is a common problem across all types of systems. In contrast,\nby addressing the entire lifecycle we can transform it from being a negative\nfeedback loop to a managed lifecycle.\n\n\n## The problems of who and when\n\n\nBuried inside the insidious friction loop are a couple key coordination\nproblems we’ll call “Who and When.” Basically, \"Who\" should create\nenvironments and \"When\" should they be created to ensure reasonable cost\noptimization? Then again, _Who_ should cleanup environments and _When_ do\nyou know that the environment is no longer needed with certainty? Even with\nhighly collaborative teams working hard together for maximum business value,\nthese questions present a difficulty that frequently results in environments\nrunning for a long time before they are used and after they are no longer\nneeded. The knowledge of appropriate timing plays a critical role in gaining\ncontrol over this source of friction.\n\n\n## The problem of non-immutable development environments\n\n\nFriction in environment lifecycle management creates a substantial knock-on\nproblem associated with long-lived environments. Long-lived environments\nthat are updated multiple times for various independent projects start to\naccumulate configuration rot; they become snowflakes with small changes that\nare left over from non-implemented experiments, software or configuration\nremovals, and other irrelevant bits and pieces. Immutability is the practice\nof not doing “in place” updates to a computing element, but rather\ndestroying it and replacing it with a fresh, built-from-scratch, element.\nDocker has made this concept very accepted and effective in production\nworkloads, but development environments frequently do not have this\nattribute due to automating without the design constraint of immutability,\nso they are updated in-place for reuse by various initiatives. If the\nenvironment lifecycle is not fully automated, it impossible to make them\nworkable on a per-feature branch basis.\n\n\n## The problem of non-isolated development environments \n\n\nWhen environments are manually provisioned or when there is a lot of cost or\nadministrative friction to setting them up, environment sharing becomes more\ncommon place. This creates sharing contention at many levels. Waiting to\nschedule into use an environment, pressure to complete work quickly so\nothers can use the environment, and restrictions on the types of changes\nthat can be made to shared environments are just some of the common sharing\ncontention elements that arise. If environments can be isolated, then\nsharing contention friction evaporates. Pushing this to the extreme of a\nper-feature branch granularity brings many benefits, but is also difficult.\n\n\n## Effect on the development value stream\n\n\nThe effect that a friction-filled environment lifecycle has on the value\nstream can be immense - how many stories have you heard of projects waylaid\nfor weeks or months while waiting on environment provisioning? What about\ndefects shipped to production because a shared environment had left over\nconfiguration during testing? Frequently this friction is tolerated in the\nvalue stream because no one will argue that unlimited environment sprawl is\nan unwise use of company resources. We all turn off the lights in our home\nwhen we are no longer using a room and it is good business sense and good\nstewardship not to leave idle resources running at work.\n\n\nThe concept of good stewardship of planetary resources is actually becoming\nan architectural level priority in the technology sector. This is in\nevidenced in AWS’ [introduction of the “Sustainability” pillar to the AWS\nWell Architected principals in\n2021](https://aws.amazon.com/blogs/aws/sustainability-pillar-well-architected-framework/)\nand many other green initiatives in the technology sector.\n\n\nIt’s imperative that efforts to improve the development value stream\nconsider whether developer environment management friction is hampering the\nbreadth, depth and velocity of product management and software development.\n\n\n## Seamless and fully automated review environment lifecycle management\n\n\nWhat if this negative feedback loop could be stopped? What if new\nenvironments were seamless and automatically created right at the moment\nthey were needed? What if developers were completely happy to immediately\ntear down an environment when they were done because it takes no\njustification nor effort on their part to create new one at will?\n\n\nEnter GitLab Review Environments!\n\n\nGitLab review apps are created by the developer action of creating a new\nbranch. No humans are involved as the environment is deployed while the\ndeveloper is musing their first code changes on their branch.\n\n\nAs the developer pushes code updates the review apps are automatically\nupdated with the changes and all quality checks and security scanning are\nrun to ensure the developer understands that they introduced a vulnerability\nor quality defect. This is done within the shortest possible amount of time\nafter the defect was introduced.\n\n\nWhen the developer merges their code, the review app is automatically torn\ndown.\n\n\nThis seamless approach to developer environment provisioning and cleanup\naddresses enough of the critical factors in the negative feedback loop that\nit is effectively nullified.\n\n\nConsider:\n\n\n- Developer environment provisioning and cleanup are fully automated,\ntransparent, developer-initiated activities. They do not consume people nor\nhuman process resources, which are always legions slower and more expensive\nthan technology solutions.\n\n- Provisioning and cleanup timing are exactly synchronized with the\ndeveloper’s need, preventing inefficiencies in idle time before or after\nenvironment usage.\n\n- They are immutable on a new branch basis - a new branch always creates a\nnew environment from fresh copy of the latest code.\n\n- They are isolated - no sharing contention and no mixing of varying\nconfiguration.\n\n- They treat developer environments as a lifecycle.\n\n\nIt is so transparent that some developers may not even realize that their\nfeature branch has an isolated environment associated with it.\n\n\n## Hard dollar costs are important and opportunity costs are paramount\n\n\nGitLab environments positively contribute to the value stream in two\ncritical ways. First, the actual waste of idle machines is dramatically\nreduced. However, more importantly, all the human processes that end up\nbeing applied to managing that waste also disappear. Machines running in the\ncloud are only lost money. Inefficient use of people’s time carries a high\ndollar cost but it also carries a higher opportunity cost. There are so many\nvalue-generating activities people can do when their time is unencumbered by\ncost-control administration.\n\n\n## Multiplying the value stream contributions of developer review\nenvironments\n\n\nDeveloper environment friction is an industry-wide challenge and GitLab\nnearly eliminates the core problems of this feedback cycle. However, GitLab\nhas also gone way beyond simply addressing this problem by creating a lot of\nadditional value through seamless per-feature branch developer environments.\n\n\nHere is a visualization of where dynamic review environments plug into the\noverall GitLab developer workflow.\n\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/gitlabenvironmentlifecycle.png)\n\n\n**Figure 1: Review environments with AWS Cloud Services**\n\n\nFigure 1 is showing GitLab’s full development cycle support with a little\nart of the possible thrown in around interfacing with AWS deployment\nservices. The green dashed arrow indicates that GitLab deploys a review\nenvironment when the branch is first created. Since the green arrow is part\nof the developer's iteration loop, the green arrow is also depicting that\nreview app updates are done on each code push. \n\n\nThe light purple box is showing that the iterative development and CI checks\nare all within the context of a merge request (MR), which provides a Single\nPane of Glass (SPOG) for all quality checks, vulnerabilities and\ncollaboration. Finally, when the merge is done, the review environment is\ncleaned up. The feature branch merge request is the furthest left that\nvisibility and remediation can be shifted. GitLab’s shifting of this into\nthe developer feature branch is what gives developers a semi-private\nopportunity to fix any quality or security findings with the specific code\nthey have added or updated.\n\n\nOne other thing to note here is that when GitLab CD code is engineered to\nhandle review environments, it is reused for all other preproduction and\nproduction environments. The set of AWS icons after the “Release” icon would\nbe using the same deployment code. However, if the GitLab CD code is\nengineered only around deploying to a set of static environments, it is not\nautomatically capable of review environments. Review environment support is\na superset of static environment support.\n\n\n## Review environments enable a profound shift left of visibility and\nremediation\n\n\nAt GitLab “shift left” is not just about “problem visibility” but also about\n“full developer enablement to resolve problems” while in-context. GitLab\nmerge requests provide critical elements that encourage developers to get\ninto a habit of defect remediation:\n\n\n- **Context** - Defect and vulnerability reporting is only for code the\ndeveloper changed in their branch and is tracked by the merge request (MR)\nfor that branch.\n\n- **Responsibility** - Since MRs and branches are associated to an\nindividual, it is evident to the developer (and the whole team) what defects\nwere introduced or discovered by which developers.\n\n- **Timing** - Developers become aware of defects nearly as soon as they are\nintroduced, not weeks or months after having integrated with other code. If\nthey were working on a physical product, we can envision that all the parts\nare still on the assembly bench.\n\n- **Visibility - Appropriately Local, Then Appropriately Global** -\nVisibility of defects is context specific. While a developer has an open MR\nthat is still a work in progress, they can be left alone to remedy\naccidentally-introduced defects with little concern from others because the\nvisibility is local to the MR. However, once they seek approvals to merge\ntheir code, then the approval process for the MR will cause the visibility\nof any unresolved defects and vulnerabilities to come to the attention of\neveryone involved in the approval process. This ensures that oversight\nhappens with just the right timing - not too early and not forgotten. This\nmakes a large-scale contribution to human efficiency in the development\nvalue stream.\n\n- **Advisement** - As much as possible GitLab integrates tools and advice\nright into the feature branch MR context where the defects are visible.\nDevelopers are given full vulnerability details and can take just-in-time\ntraining on specific vulnerabilities. \n\n- **Automated Remediation** - Developers can choose to apply\nauto-remediations when they are available.\n\n- **Collaboration** - They can use MR comments and new issues to collaborate\nwith team mates throughout the organization on resolving defects of all\ntypes.\n\n\nHaving seamless, effortless review environments at a per-feature branch\ngranularity is a critical ingredient in GitLab’s ability to maximize the\nshift left of the above developer capabilities. This is most critical in the\ndeveloper checks that require a running copy of application, which is\nprovided by the review environments. These checks include things such as\nDAST, IAST, API fuzzing and accessibility testing. The industry is also\ncontinuing to multiply the types of defect scanners that require an actively\nrunning copy of the application.\n\n\n## Extending GitLab review environments to other cloud application framework\nPaaS\n\n\nSo you may be thinking, “I love GitLab review environments, but not all of\nour applications are targeting Kubernetes.” It is true that the out-\nof-the-box showcasing of GitLab review environments depends on Kubernetes.\nOne of the key reasons for this is that Kubernetes provides an integrated\ndeclarative deployment capability known as deployment manifests. The\nenvironment isolation capability, known as namespaces, also provides a\ncritical capability. GitLab wires these Kubernetes capabilities up to a few\nkey pieces of GitLab CD to accomplish the magic of isolated, per-feature\nbranch review environments.\n\n\nAs far as I know there is no formal or defacto industry term for what I’ll\ncall “Cloud Application Framework PaaS.” Cloud-provided PaaS can be targeted\nat various “levels” of the problem of building applications. For instance,\nprimitive components such as AWS ELB address the problem of application load\nbalancing by providing a variety of virtual, cloud-scaling and secured\nappliances that you can use as a component of building an application.\nAnother example is [AWS Cognito](https://aws.amazon.com/cognito/) to help\nwith providing user login and profile services to an application build.\n\n\nHowever, there are also cloud PaaS offerings that seek to solve the entire\nproblem of rapid application building and maintenance. These are services\nlike AWS Amplify and AWS AppRunner. These services frequently knit together\nprimitive PaaS components (such as described above) into a composite that\nattempts to accelerate the entire process of building applications.\nFrequently these PaaS also include special CLIs or other developer tools\nthat attempt to abstract the creation, maintenance and deployment of an\nInfrastructure as Code layer. They also tend to be\n[GitOps](/topics/gitops/)-oriented by storing this IaC in the same\nrepository as the application code, which enables full control over\ndeployments via Git controls such as branches and merge requests.\n\n\nThis approach relieves developers of early stage applications from having to\nlearn IaC or hire IaC operations professionals too early. Basically it\nallows avoidance of overly early optimization of onboarding IaC skills. If\nthe application is indeed successful it is quite common to outgrow the\nintegrated IaC support provided by these specialized PaaS, however, the\nevolution is very natural because the managed IaC can simply start to be\ndeveloped by specialists.\n\n\nThe distinction of cloud application framework PaaS is important when\nunderstanding where GitLab can create compound value with Dynamic Review\nEnvironments. I will refer to this kind of PaaS as “Cloud Application\nInfrastructure PaaS” that tries to solve the entire “Building Applications\nProblem.”\n\n\nSo we have a bunch of GitLab interfaces and conventions for implementing\nseamless developer review environments and we have non-Kubernetes cloud\napplication infrastructures that provide declarative deployment interfaces\nand we can indeed make them work together! Interesting it is all done in\nGitLab CI YAML, which means that once you see the art of the possible, you\ncan start implementing dynamic review environment lifecycle management for\nmany custom environment types with the existing GitLab features. \n\n\n## A working, non-Kubernetes example of dynamic review environments in\naction\n\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/CloudFormationDeployAnimatedGif.gif)\n\n\n**Figure 2: Working CD example of review environments for AWS\nCloudFormation**\n\n\nFigure 2 shows the details of an actual non-Kubernetes working example\ncalled CloudFormation AutoDeploy With Dynamic Review Environments. This\nproject enables any AWS CloudFormation template to be deployed. It\nspecifically supports an isolated stack deployment whenever a review branch\nis created and then also destroys that environment when the branch is\nmerged. \n\n\nHere are some of the key design constraints and best practices that allow it\nto support automated review environments.:\n\n\n- **The code is implemented as an include.** Notice that the main\n[.gitlab-ci.yml](https://gitlab.com/guided-explorations/aws/cloudformation-deploy/-/blob/main/.gitlab-ci.yml)\nfiles have only variables applicable to this project and then the inclusion\nof Deploy-AWSCloudFormation.gitlab-ci.yml. This allows you to treat the\nCloudFormation integration as a managed process, shared include to be\nimproved and updated. If the stress of backward compatibility of managing a\nshared dependency is too much, you can encourage developers to make a copy\nof this file to essentially version peg it with their project.\n\n\n- **Avoids Conflict with Auto DevOps CI Stage Names** - The [standard stages\nof Auto Devops are\nhere](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml#L70).\nThis constraint allows the auto deploy template to be leveraged. \n\n\n- **Creates and Sequences Custom Stages as Necessary** - For instance, you\ncan see we’ve added `create-changeset` stage and jobs.\n\n\n- The `deploy-review` job and it’s `environment:` section must have a very\nspecific construction, let’s look at the important details:\n\n  ```\n    rules:\n      - if: '$CI_COMMIT_BRANCH == \"main\"'\n        when: never\n      - if: '$REVIEW_DISABLED'\n        when: never\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS == \"true\"'\n        when: manual\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS != \"true\"'\n    artifacts:\n      reports:\n        dotenv: envurl.env\n    environment:\n      name: review/$CI_COMMIT_REF_SLUG\n      url: $DYNAMIC_ENVIRONMENT_URL\n      on_stop: stop_review\n  ```\n\n  \n\n  - `rules:` are used to ensure this job only runs when we are not on the main branch. The main branch implements long lived stage and prod environments.\n  - `artifacts:reports:dotenv` allows variables populated during a CI job to become pipeline level variables. The most critical role this does in this job is to allow the URL retrieved from CloudFormation Outputs to be populated into the variable DYNAMIC_ENVIRONMENT_URL. The file `enviurl.env` would have at least the line `DYNAMIC_ENVIRONMENT_URL={url-from-cloudformation}` in it. You can see this in the job code as `echo \"DYNAMIC_ENVIRONMENT_URL=${STACK_ENV_URL}\" >> envurl.env`\n  - `environment:name:` is using the Auto Deploy convention of placing review apps under the review environments top level called `review` The reference $CI_COMMIT_REF_SLUG ensures that the branch (or tag name) is used, but with all illegal characters removed. By your development convention, the Environment Name should become a part of the IaC constructs that ensure both uniqueness as well as identifiability by this pipeline. In GitLab's standard auto deploy for Kubernetes this is done by constructing a namespace that contains the name in this provided parameter. In CloudFormation we make it part of the Stack Name. The value here is exposed in the job as the variable ${ENVRONMENT}.\n  - `environment:url:` it is not self-evident here that the variable DYNAMIC_ENVIRONMENT_URL was populated by the deployment job and added to the file `enviro.env` so that it would contain the right value at this time. This causes the GitLab “Environment” page to have a clickable link to visit the environment. It also is used by DAST and other live application scan engines to find and scan the isolated environment.\n  - `environment:on_stop:` in the deploy-review job is what maps to the `stop_review` named job. This is the magic sauce behind automatic environment deletion when a feature branch is merged. `stop_review` must be written with the correct commands to accomplish the teardown.\n\n## A reusable engineering pattern\n\n\nThis CloudFormation pattern serves as a higher-level pattern of how GitLab\nreview environments can be adopted to any other cloud “Application Level\nPaaS.” This is a term I use to indicate a cloud PaaS that is abstracted\nhighly enough that developers think of it as “a place to deploy\napplications.” Perhaps a good way to contrast it with PaaS that does not\nclaim to serve as an entire application platform. Cloud-based load balancers\nare a good example of a PaaS that performs a utility function for\napplications but is not a place to build an entire cloud application. \n\n\n## Application PaaS for abstracting IaC concerns for developers\n\n\nGitLab auto deploy combines well with the cloud application framework PaaS\nthat has a disposition toward developer productivity by reducing or\neliminating IaC management required by developers. AWS Amplify has such\nproductivity support in the form of a developer specific CLI which allows\nimpacting to be authored and updated in the same Git repository where the\napplication code is stored. Adding an entire scaling database PaaS is as\nsimple as running a single CLI command.\n\n\nGenerally such Application PaaS not only generate and help maintain IaC\nthrough highly abstracted CLI or UI actions, they also contain a single\n`deploy` command which is easily combined with a GitLab Auto Deploy template\nfor working with that particular Application PaaS.\n\n\n## Wrap up\n\n\nHopefully this article has helped you understand that:\n\n\n- GitLab already contains a super valuable feature that automates developer\nenvironment lifecycle management.\n\n- It is critical in addressing a key friction in the DevOps value chain.\n\n- It can be extended beyond Kubernetes to other cloud application framework\nPaaS offerings.\n\n\n\nPhoto by [Sandeep\nSingh](https://unsplash.com/@funjabi?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/friction?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[723,767,9],{"slug":909,"featured":6,"template":703},"environment-friction-cycle","content:en-us:blog:environment-friction-cycle.yml","Environment Friction Cycle","en-us/blog/environment-friction-cycle.yml","en-us/blog/environment-friction-cycle",{"_path":915,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":916,"content":922,"config":930,"_id":932,"_type":14,"title":933,"_source":16,"_file":934,"_stem":935,"_extension":19},"/en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"title":917,"description":918,"ogTitle":917,"ogDescription":918,"noIndex":6,"ogImage":919,"ogUrl":920,"ogSiteName":686,"ogType":687,"canonicalUrls":920,"schema":921},"How to export vulnerability reports to HTML/PDF and Jira","With GitLab's API, it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/exporting-vulnerability-reports-to-html-pdf-jira","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to export vulnerability reports to HTML/PDF and Jira\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-09-14\",\n      }",{"title":917,"description":918,"authors":923,"heroImage":919,"date":925,"body":926,"category":721,"tags":927},[924],"Siddharth Mathur","2023-09-14","\nGitLab's [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) makes it easy to triage security scan results without ever having to leave the platform. You can manage your code, run security scans against it, and fix vulnerabilities all in one place. That being said, some teams prefer to manage their vulnerabilities in a separate tool like Jira. They may also need to present the vulnerability report to leadership in a digestible format.\n\nOut of the box, GitLab's Vulnerability Report can be [exported to CSV](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/#export-vulnerability-details) with a single click, for easy analysis in other tools. In some cases though, a simple PDF of the report is all that's needed. \n\nWith [GitLab's API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities), it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project. In this blog, we'll show you how to export to HTML/PDF and Jira. **Note that the scripts used in this tutorial are provided for educational purposes and they are not supported by GitLab.**\n\n## Exporting to HTML/PDF\nTo export your vulnerability reports to HTML or PDF, head to the [Custom Vulnerability Reporting](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting) project. \n\n![Project overview](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_overview.png)\n\n\nThis project contains a script that queries a project's vulnerability report, and then generates an HTML file from that data. The pipeline configured in the project runs this script and converts the HTML file to PDF as well.\n\nTo use the exporter, first [fork the project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project).\n\n![Project import](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_import.png)\n\n\nSet the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/custom-vulnerability-reporting)\n\nAfter you've set the required CI/CD variables, manually run a pipeline from your project's Pipelines page. Once the pipeline is complete, you'll see your file export by going to the “build_report” (for HTML) or “pdf_conversion” job and selecting “Download” or “Browse” on the sidebar under \"Job artifacts.\" And there you have it! A shareable, easy-to-read export of your project's vulnerabilities.\n\n![PDF export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/pdf_export.png)\n\n\n## Exporting vulnerability info to Jira\nGitLab lets you create Jira tickets from vulnerabilities through the UI using our [Jira integration](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-jira-issue-for-a-vulnerability). While you can do this individually for vulnerabilities that need actioning, sometimes teams need to bulk-create Jira tickets for all their vulnerabilities. We can leverage GitLab and Jira's APIs to achieve this.\n\nTo get started, head to the [External Vulnerability Tracking](https://gitlab.com/smathur/external-vulnerability-tracking) project. This script fetches vulnerabilities in the same way as the script above, but it uses the Jira API to create a ticket for each vulnerability. Each ticket's description is also populated with details from GitLab's vulnerability report.\n\nTo use the exporter, simply [fork the project](https://gitlab.com/smathur/external-vulnerability-tracking/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project), and set the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/external-vulnerability-tracking)\n\nYou will also need the following from Jira:\n- Jira [personal access token](https://id.atlassian.com/manage-profile/security/api-tokens)\n- Jira API issue endpoint URL (for SaaS this is https://ORG_NAME.atlassian.net/rest/api/latest/issue/)\n- Jira user email ID\n- Jira project key where you want to create vulnerability tickets (e.g. ABC)\n\nOnce you have set your CI/CD variables as described in the project readme, simply run a pipeline from your project's Pipelines page, and watch as your tickets get created in Jira!\n\nIf you run the pipeline again in the future, the script will run a search query against your Jira project to prevent duplicate tickets from being created. It will create tickets for new vulnerabilities that aren't already in Jira.\n\n![Jira export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/jira_export.png)\n\n\n## References\n- [GitLab Vulnerability API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities)\n- [Custom Vulnerability Reporting project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting)\n- [External Vulnerability Tracking project](https://gitlab.com/smathur/external-vulnerability-tracking)\n- [Jira REST API examples](https://developer.atlassian.com/server/jira/platform/jira-rest-api-examples/)\n\n",[700,928,929,9],"collaboration","security",{"slug":931,"featured":6,"template":703},"exporting-vulnerability-reports-to-html-pdf-jira","content:en-us:blog:exporting-vulnerability-reports-to-html-pdf-jira.yml","Exporting Vulnerability Reports To Html Pdf Jira","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira.yml","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"_path":937,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":938,"content":944,"config":953,"_id":955,"_type":14,"title":956,"_source":16,"_file":957,"_stem":958,"_extension":19},"/en-us/blog/five-signs-you-should-think-bigger",{"title":939,"description":940,"ogTitle":939,"ogDescription":940,"noIndex":6,"ogImage":941,"ogUrl":942,"ogSiteName":686,"ogType":687,"canonicalUrls":942,"schema":943},"Five signs you should think BIGGER!","Are you a designer who is frustrated with only focusing on the next milestone? Do you feel like you have to answer too many questions in every Issue? Do you feel like your product is not making any progress? **Time to Think Bigger!**","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099620/Blog/Hero%20Images/Blog/Hero%20Images/insights_insights.png_1750099620265.png","https://about.gitlab.com/blog/five-signs-you-should-think-bigger","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five signs you should think BIGGER!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Iain Camacho\"}],\n        \"datePublished\": \"2021-03-30\",\n      }",{"title":939,"description":940,"authors":945,"heroImage":941,"date":947,"body":948,"category":721,"tags":949},[946],"Iain Camacho","2021-03-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAs a designer, it’s difficult to balance the scale of initiatives: Design too small, and nobody is excited or can understand the direction things are going. Start too big and everyone on the team may be too intimidated to start. ThinkBIG is a way of utilizing designers’ natural skillset to balance the iterative nature of engineering with the visionary nature of design.\n\nHere are 5 signals that you should switch up your style and Think Bigger:\n\n### 1) Every milestone is spent only prepping the next\n\n#### Signal\n\nWe’ve all been there. The next milestone planning issue is starting to get filled out and you, the designer, are realizing how many issues need design in order to be ready. As the priorities shift, you know the last two weeks of this milestone will be spent desperately trying to design mockups for engineers to start working on days later. I like to call this “Feeding the sharks”. It describes a certain level of panic some designers feel every milestone: If I don’t deliver enough, I might get chomped!\n\n#### Solution\n\nThinkBIG focuses on creating a larger-scale vision that can be iterated on as we go. This means that each design you put together leads to many independent issues engineers can work on. For a designer, this increases [results](https://handbook.gitlab.com/handbook/values/#results) by delivering one design worth many issues.\n\n### 2) Engineers are asking _a lot_ of questions\n\n#### Signal\n\nHave you ever started a new milestone and as engineers get started, they have a million questions detailing every possible state, permutation, and example that they should account for? This line of questioning means you, the designer, now need to make a myriad of new designs with only minute changes between them. This is not an [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) use of the designer’s time.\n\n#### Solution\n\nFirst off, all these questions are valid and decisions that need to be made. By Thinking Bigger, engineers are better prepared to handle all the edge cases independently because they walk into their work with a fuller context of the impact on users.  This enables empathy-driven engineering, allowing engineers to lead the conversation around edge-cases with solutions in mind, instead of needing it to be defined ahead of time. By pushing the edge cases further down the product development lifecycle, there is also a unique opportunity for product, design, and engineering to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) on delivering value to customers while still working iteratively.\n\n### 3) Nobody agrees on what the “MVC” actually is\n\n#### Signal\n\nPicture it: You’ve worked hard for weeks refining and distilling a big feature ask into a nicely designed MVC. It’s small, delivers value, and is beautiful to boot! You’ve convinced your PM to prioritize this beautiful little gem and it’s going onto the planning board. Everything feels amazing until… devastation!\n\nAfter engineering looked at it, they came back and said it was too large and would need to be broken down further. Now you’re at the end of your milestone and you’re swiftly picking away at your beautiful design into a shallow imitation of its former glory.\n\n#### Solution\n\nHowever, there is a simple way to keep this from happening: “[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is a team sport”. The designer shouldn’t be the only person on the team compromising for the sake of MVC. With ThinkBIG, you have multiple chances to bring engineering into the fold early and with the full vision in mind. This means devs are part of the conversation from the start, able to craft a valuable iteration and your designs become the conversation piece of deciding “What can we do next to deliver an amazing experience to our customers?”\n\n### 4) We’re working so hard but not getting anywhere\n\n#### Signal\n\nWorking iteratively is incredibly powerful and at GitLab, we can see the value of an iterative approach. We’re able to change our priorities at a moment’s notice and the work we actually have to deliver is reasonable and manageable while continuously delivering new value to customers. There is, however, a small drawback: When you’re only focusing on the step immediately in front of you, it’s easy to get lost along the way.\n\n#### Solution\nAs a designer, we have a unique opportunity to be the navigator for our teams. Using the ThinkBIG model, designers are empowered to hold responsibility for the Vision. From here, the Product Manager/Product Designer relationship becomes a balance between the vision and the strategy. Designs based on the large vision are used to keep the team on track for hitting the targets that bring value to customers while allowing for collaboration with the rest of the team on what tiny steps we take to get there.\n\n### 5) Engineers are reworking a lot\n\n#### Signal\n\nMy engineer and I are excited to work on a new effort. I’ve designed the first iteration and successfully passed it to them.  While they’re building, I’m working on the design for the next iteration. A few weeks later the new changes are merged, the next iteration designs are ready, and customers are already seeing value. Your engineer looks at the next iteration and painfully mutters “Well, I’ll have to rewrite what I wrote the last milestone to account for this.”\n\n#### Solution\n\nIn a highly iterative development lifecycle, it’s not uncommon to have to rework things as the product evolves. However, it shouldn’t be happening every time. With ThinkBIG, engineers are informed of the long-term goal as well as the short-term MVC iteration. This extra context allows them to deliver the iteration while architecting their code in an informed way of where it will go.\n\n### Start Thinking BIGGER!\n\nAre some of these signals sounding familiar? Then switching your design style to ThinkBIG may be for you! The simplest way to make this change is to move iteration breakdown to **after** the design phase. It immediately shows engineers where we want to go as a product or feature, opens the implementation breakdown (MVC) conversation to the whole team, and provides incredibly valuable insight to everyone on the team. This model of working helps designers be more efficient, deliver results, and foster a tight collaboration with the broader team. To see this process in action, check out a [Package ThinkBIG around the dependency proxy design and research](https://www.youtube.com/watch?v=LXFu6oDxhsw). For more information, check out the GitLab Handbook on [ThinkBIG](https://about.gitlab.com/handbook/product/ux/thinkbig/) to learn more.\n",[928,950,951,952,9],"design","inside GitLab","remote work",{"slug":954,"featured":6,"template":703},"five-signs-you-should-think-bigger","content:en-us:blog:five-signs-you-should-think-bigger.yml","Five Signs You Should Think Bigger","en-us/blog/five-signs-you-should-think-bigger.yml","en-us/blog/five-signs-you-should-think-bigger",{"_path":960,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":961,"content":967,"config":975,"_id":977,"_type":14,"title":978,"_source":16,"_file":979,"_stem":980,"_extension":19},"/en-us/blog/gitlab-apis-ci",{"title":962,"description":963,"ogTitle":962,"ogDescription":963,"noIndex":6,"ogImage":964,"ogUrl":965,"ogSiteName":686,"ogType":687,"canonicalUrls":965,"schema":966},"Using Gitlab APIs: Real Use Case Scenario","Learn about how GitLab CI and APIs can help you automate bulk tasks","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681037/Blog/Hero%20Images/gitlabapi-cover.jpg","https://about.gitlab.com/blog/gitlab-apis-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using Gitlab APIs: Real Use Case Scenario\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2020-01-22\",\n      }",{"title":962,"description":963,"authors":968,"heroImage":964,"date":970,"body":971,"category":972,"tags":973},[969],"William Arias","2020-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitlab APIs along with  Continuous Integration can be very helpful when executing certain bulk tasks.\n\nConsider this requirement derived from a real-world scenario\n\n* Company XYZ possess several repositories that have been organized under a Gitlab group\n\n![group](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/gitlab-group.png){: .shadow.medium.center.wrap-text}\n\n* The company needs to test the building of projects in bulk using new  hardware (Runner with different CPU Architecture) that will bring down  execution costs, whenever the build in each of the projects fails an issue must be  automatically created.\n\n![runner](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/runner.png){: .shadow.medium.center.wrap-text}\n\n* Lastly, all the issues that were automatically created whenever a project built failed,  should be collected in bulk and reported back to a Wiki\n\n![pipelineview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/3-pipelineview-collect-issues.png){: .shadow.medium.center.wrap-text}\n\nHow do we test the building of those several projects and create issues and reports about its execution automatically? Let's use Gitlab CI and  APIs.\n\n\n## 1. Company groups and projects Structure\n\nIn this case, the set of projects were grouped under a single group, following this structure:\n\n![groupview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/4-group-view-api-blog.png){: .shadow.medium.center.wrap-text}\n\n## 2. Automatically creating Issues leveraging Gitlab CI and API\n\nIn order to create issues using Gitlab API we will use the Issues API an example of that  can use the following cURL command:\n\n![curl](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-create-issue-api-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nThe API Call: \n\n `curl --request POST --header \"PRIVATE-TOKEN:$ISSUE_API_KEY\" \"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/issues?title=Build%20Failed&labels=ARMbuild&description=Project%20Tests%20Failed%20on%20ARM\"`\n\n The previous Gitlab API call can be configured to be executed whenever a job fails. Let's dissect this API Call to understand its parameters so you can potentially customize it  for your project environment\n\n* Base URL:  https://gitlab.com/api/v4/projects\n* Project where we want to add the issue:  $CI_PROJECT_ID Notice this ID is unique and corresponds to the project where the CI/CD pipeline runs \n* Issues: Endpoint we use to tell Gitlab we want to add an issue to the project\n* Parameters:\n  * Title: How we want the issue to be titled\n  * Labels: Helpful to group issues by label or type, They help you organize and tag your work so you can track and find the work items you’re interested in.\n  * Description: Field to explain the nature of the issue if needed\n\n The request is of type POST, because we are sending data to our receiver service.  For this call to be successful it requires  authentication for which we will use *PRIVATE-TOKEN* header\n\n The private token can be generated by following these steps [How-to-generate-token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n\nWhen we execute the above API call, we create an issue in the corresponding Gitlab project\n![issueproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-issues-created.png){: .shadow.medium.center.wrap-text}\n\nGreat, so once the multi-project pipeline has run,  each of the projects that failed in its building stage will create an issue warning us to double check why it failed while documenting the failure and labeling it for future follow-up.\n![multiproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7.1-multiproject-pipeline-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\n## 3. Automatically collecting all the issues from Gitlab Group\n\nThanks to Gitlab CI and APIs we can collect all the issues created and report them back, by adding this script  in  your pipeline stage\n\n![collectissues](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7-collecting-issues-apiblog.png){: .shadow.medium.center.wrap-text}\n\nLet's dissect again the main API call:\n\n`curl --header \"PRIVATE-TOKEN:$GROUP_ISSUE_LIST\" \"https://gitlab.com/api/v4/groups/9123625/issues`\n\n* Base url: https://gitlab.com/api/v4/\n* Group resource: /groups/9123625\n* Issues resources: /issues \n\nThe previous API call will return a json object, the one we will save as an artifact when executing our pipeline job. Notice this artifact is created and saved automatically by Gitlab CI\nGreat! So far we created issues per failed project, and collected them all in one single step\n\n\n## 4. Reporting back to Wiki Project \n\n![wikijob](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/8-reportwiki-gitlab-api.png){: .shadow.medium.center.wrap-text}\n\nFor convenience, the json report was transformed to markdown, then using the following script we publish the markdown report to the Wiki of an specific project\n\n`curl --data \"format=markdown&title=$CI_JOB_ID&content=$results\" --header \"PRIVATE-TOKEN:$API_WIKI\" \"https://gitlab.com/api/v4/projects/20852684/wikis\"`\n\nLet's breakdown again the API call:\n\n* Base url: https://gitlab.com/api/v4/\n* Project resource ID : /projects/20852684\n* Wiki resource: /wiki\n* Parameters: \n  * Data format: markdown. We want to publish a markdown table\n  * Title: Title of the Wiki entry, we use the environment variable corresponding to the CI_JOB that was executed\n  * Content: The markdown table generated with the issues collection\n\n Finally, when the last API call has been executed, this is an example of the output we can get: \n\n ![report](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/10-test-report-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nLet's recapitulate, by using Gitlab CI in a multi project pipeline along with APIs we were able to test and report automatically x-number of projects and its compatibility with a new hardware CPU architecture. More information about the APIs utilized for this project here:\n\n[Issues-api](https://docs.gitlab.com/ee/api/issues.html#new-issue)\n[Collect-group-issues](https://docs.gitlab.com/ee/api/issues.html#list-group-issues)\n[WikisAPI](https://docs.gitlab.com/ee/api/wikis.html)\n\n[Multi-project-pipeline](https://about.gitlab.com/blog/cross-project-pipeline/)\n\n\nIf you’d like to see GitLab’s API in action, watch this [video](https://youtu.be/zdBwMHARkU0?t=469).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nCover image credit:\n\nCover image by [Mohanan](https://unsplash.com/photos/yQpAaMsQzYE) on [Unsplash](https://unsplash.com)\n{: .note}\n\n","unfiltered",[745,9,723,974],"demo",{"slug":976,"featured":6,"template":703},"gitlab-apis-ci","content:en-us:blog:gitlab-apis-ci.yml","Gitlab Apis Ci","en-us/blog/gitlab-apis-ci.yml","en-us/blog/gitlab-apis-ci",{"_path":982,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":983,"content":989,"config":994,"_id":996,"_type":14,"title":997,"_source":16,"_file":998,"_stem":999,"_extension":19},"/en-us/blog/gitlab-at-aws-re-invent-2023",{"title":984,"description":985,"ogTitle":984,"ogDescription":985,"noIndex":6,"ogImage":986,"ogUrl":987,"ogSiteName":686,"ogType":687,"canonicalUrls":987,"schema":988},"GitLab at AWS re:Invent 2023","GitLab and AWS have streamlined development and security for DevSecOps teams. Learn how in lightning talks, sessions, live demos, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/gitlab-at-aws-re-invent-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab at AWS re:Invent 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-11-22\",\n      }",{"title":984,"description":985,"authors":990,"heroImage":986,"date":991,"body":992,"category":786,"tags":993},[718],"2023-11-22","GitLab will be at AWS re:Invent 2023 in Las Vegas, November 27 to December 1, to demonstrate how the GitLab DevSecOps Platform on Amazon Web Services delivers secure, enterprise-grade AI throughout the software development lifecycle. Stop by Booth #1152 in the Security Zone for [lightning talks, live demos, customer sessions, and more](https://about.gitlab.com/events/aws-reinvent/) all week. \n\nMake sure to [check out our event page and calendar](https://about.gitlab.com/events/aws-reinvent/) to find sessions, locations, opportunities to meet with GitLab, and more (note, they do not appear in the AWS event app). Some sessions will also be available on-demand after the conference.\n\nHere are some of the lightning talks GitLab will be presenting:\n\n**Frictionless developer experience: Using human habits to accelerate DevSecOps maturity and increase joy**\n\nGitLab’s long-standing approach to building DevSecOps pipelines aligns with AWS’ new emphasis on frictionless developer experiences. Join this session to learn how the GitLab DevSecOps platform represents a true “shift left” by empowering and streamlining developers’ normal workflow.\n\n[Add to calendar - Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654966e4f2269af78f005ba1)\n\n**New integrations and solutions for using GitLab and AWS together**\n\nIn recent months, AWS and GitLab have built new service integrations for source control, CI, and CD. You'll learn how GitLab integrates with AWS CodeStar Connections, Amazon CodeGuru, OpenID, and more, as well as development and deployment solutions for Serverless.com Framework and Terraform to AWS.\n\nAdd to calendar\n* [Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654144eef011a50313dc7113)\n* [Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654942dfef8fa23b213f0eca)\n* [Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=65494b66a0b8daf9ca33a386)\n\n**Secure and assured Terraform development using GitLab security scanning policies and managed DevOps environments**\n\nThis lightning talk discusses and demonstrates working example code that extends GitLab's existing support for Terraform State management with full lifecycle-managed DevOps environments for merge requests, long-lived pre-production environments, production environments, and one-off experimental environments. Whether you are developing infrastructure as code specifically or embedding it with application code for the sake of easy environment support, this lightning talk has something to offer you.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654961043165b6f013635639)\n\n**Secure GitLab CD pipelines to AWS with OpenID Federation, OIDC, and JWT**\n\nGitLab has three ways to authenticate and authorize your CI and CD workloads into AWS environments. Adding and refining OpenID provides the ability to use an industry standard, which is the most advanced of the three. Join us to learn how to accomplish this highly secure integration option.\n\n[Add to calendar - Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=6549580763edc0caa46ea061)\n\n**Security intelligence through full integration of Amazon CodeGuru Security into GitLab**\n\nAWS CodeGuru Security has created a full integration that enables you to view scanner results in GitLab merge requests and security dashboards so you can use them to block merges in security policy merge approval rules — just like GitLab’s integrated security scanning results. Attend this lightning talk to learn more.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654953f963edc0cdbf6e8c6f)\n\n## GitLab and AWS: The year in review\nThroughout 2023, GitLab and AWS announced partner designations and new service integrations that enable development, security, and operations teams to collaborate more easily, to take advantage of AI at all stages, and to flexibly scale infrastructure to create and deploy secure software faster. \n\n#### AWS recognized GitLab as a partner in several categories\n\n- **AWS DevSecOps Partner Competency Specialty:** This specialty denotes that GitLab makes it easy for customers to [integrate security across every stage](https://about.gitlab.com/blog/aws-devsecops-competency-partner/) of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\n\n-  **Amazon Linux 2023 Ready Partner:** Amazon Linux 2023-specific RPM packages are available for GitLab, starting at [Version 16.3.0](https://docs.gitlab.com/ee/administration/package_information/supported_os.html) and for GitLab Runner. Official GitLab support for Amazon Linux 2023 also means GitLab builds the RPM packages and hosts them on our packages infrastructure, Graviton (arm64) and amd64 architectures are both supported. To install GitLab on Amazon Linux 2023, [follow these instructions](https://about.gitlab.com/install/#amazonlinux-2023). \n\nLearn more about [GitLab's AWS partner designations](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_partner_designations.html).\n\n#### AWS CodeStar Connections opens up a host of AWS service integrations\n\nAWS recently completed the integration of GitLab.com SaaS into its AWS CodeStar Connections service. This service is a foundational, shared service used by many other AWS services to connect to Git repositories outside of AWS. As a result, GitLab was immediately available to AWS services once this integration was completed.\n\nGitLab is available at CodeStar Connections throughout many AWS services for connectivity to Git. In addition, using a CodeStar Connection for an AWS CodePipeline opens up other service integrations that primarily rely on CodePipeline as their key integration point.\n\nHere is a visual map of the integrations that are currently available:\n\n![CodeStar Connections integrations](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676883/Blog/Content%20Images/gitlabcodestarconnectionsintegration.png)\n\n#### AI customization with AWS CodeWhisperer\n[AWS CodeWhisperer's customization capability](https://aws.amazon.com/blogs/aws/new-customization-capability-in-amazon-codewhisperer-generates-even-better-suggestions-preview/) leverages CodeSuite Connections, allowing generative code suggestions to take into account the libraries and design patterns of your current application when suggesting new code. It does so with no ingestion of your code into the general LMM creation. AWS CodeWhisperer can be pointed to a GitLab repository. \n\n#### AWS CodeGuru and GitLab Ultimate secure scanning integration\nThe AWS CodeGuru team [built an integration with GitLab CI](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html#scm-integrations) as part of their build secure scanning capabilities. [Amazon CodeGuru Security findings](https://docs.aws.amazon.com/codeguru/latest/security-ug/get-started-gitlab.html) use GitLab’s vulnerability report formatting, enabling exports to integrate directly into GitLab Ultimate security features such as merge request views, security dashboards, and in-context remediation solutions and training. Importantly, it allows these findings to be addressed by GitLab Security Policy Merge Approval Rules. \n\n#### GitLab's new single-tenant Saas option sits atop AWS\nEarlier this year, GitLab launched [GitLab Dedicated](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/), a single-tenancy solution for organizations in highly regulated industries that have complex regulatory, compliance, and data residency requirements. The fully isolated SaaS offering is hosted and managed by GitLab and deployed on AWS in a cloud region of the customer's choosing. [Learn more about how GitLab built GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/).\n\n## Plan your week at AWS re:Invent\nFill your calendar with GitLab at AWS re:Invent! [Check out our calendar](https://about.gitlab.com/events/aws-reinvent/) of sponsored sessions, lightning talks, live demos, and more throughout the week at Booth #1152.\n",[9,810,496,284],{"slug":995,"featured":91,"template":703},"gitlab-at-aws-re-invent-2023","content:en-us:blog:gitlab-at-aws-re-invent-2023.yml","Gitlab At Aws Re Invent 2023","en-us/blog/gitlab-at-aws-re-invent-2023.yml","en-us/blog/gitlab-at-aws-re-invent-2023",{"_path":1001,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1002,"content":1008,"config":1015,"_id":1017,"_type":14,"title":1018,"_source":16,"_file":1019,"_stem":1020,"_extension":19},"/en-us/blog/gitlab-cnh-for-50k-users",{"title":1003,"description":1004,"ogTitle":1003,"ogDescription":1004,"noIndex":6,"ogImage":1005,"ogUrl":1006,"ogSiteName":686,"ogType":687,"canonicalUrls":1006,"schema":1007},"Ready-To-Run GitLab for 50,000 users with AWS Quick Start","If you have two hours, you can deploy a GitLab instance on EKS for any number of users. All it takes is about 14 clicks! Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/gitlab-cnh-for-50k-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2021-10-06\",\n      }",{"title":1009,"description":1004,"authors":1010,"heroImage":1005,"date":1011,"body":1012,"category":721,"tags":1013},"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start",[718],"2021-10-06","\n\nIf you have spent time reviewing GitLab Reference Architectures, you may have noticed the flexibility of the GitLab codebase; it's possible to support a broad range of implementations from a single box for under one hundred users to horizontal hyper-scaled setups for 50,000 or more.\n\nScaling to massive sizes requires the services within GitLab to be broken out into dedicated compute and storage layers so they can each expand cost effectively based on high loading and an organization's specific usage patterns.\n\nThose who provision large scale systems on the cloud generally turn to [Infrastructure as Code (IaC)](/direction/delivery/infrastructure_as_code/) to ensure consistency and to allow easy setup of pre-production environments for the target system. Until recently, GitLab implementers have had to craft this code from scratch.\n\nNow, thanks to our investments in IaC tooling, GitLab customers now have an entire implementation eco-system to work from. These efforts include the [GitLab Environment Toolkit (GET)](/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/) and the AWS Quick Start for cloud native hybrid on EKS.\n\nThis post will focus on the AWS Quick Start - but it's worth noting both initiatives are open source - so you can consume, customize and contribute!\n\n## What is an AWS Quick Start?\n\nAWS Quick Starts are much more than the \"getting started\" feeling implied by their name. As a part of the Quick Start program, AWS ensures that each one reflects the best practices of the software vendor (GitLab in this case) as well as AWS' own well-architected standards. They reflects a high level of technical partnership and technical assurance by both companies. The Quick Start program also includes a hard requirement for high availability of every component of the deployed application. Even bastion hosts are run in an autoscaling group so they will respawn if they unexpectedly terminate. Quick Starts are also intended to create a \"Ready-to-Run\" implementation whenever possible. Quick Starts are open source and have a dependency model which allows GitLab to reuse the existing EKS Quick Start as a foundation.\n\n## What Is the GitLab AWS implementation pattern for cloud native hybrid on EKS?\n\nGitLab has Reference Architectures that determine how to install GitLab for various user counts. Each Reference Architecture has a section on cloud native hybrid to show how to configure it and the advised number of vCPUs and memory for the target user count. Each one is similar to blueprints for a building. \n\nThe AWS implementation pattern for cloud native hybrid on EKS builds on this information by:\n\n- Showing how to maximize the usage of AWS PaaS with assurance of GitLab Reference Architecture compliance.\n- Showing a tally of total cluster resources as specified by the Rreference Architecture.\n- Presenting a bill of materials listing:\n\n  - EKS node instance type (sizing) and count as tested.\n  - RDS PostgreSQL and Redis Elasticache instance types (sizing) and count as tested.\n  - Gitaly Cluster instance types (sizing) and count as tested.\n  \n- [GPT testing](https://gitlab.com/gitlab-org/quality/performance) results for a system configured according to the bill of materials. This can be used to compare back to the reference architectures and to your own configuration that is based on the bill of materials.\n\nSo while the Reference Architectures are like building blueprints, the AWS implementation pattern for cloud native hybrid on EKS intends to be like a bBill of mterials (shopping list) you can plug directly into the parameters of the AWS Quick Start or the GitLab Environment Toolkit to build GitLab on EKS with a pre-tested configuration.\n\n## \"Deploy Now\" links\n\nWithin each AWS implementation pattern for cloud native hybrid on EKS you will find some \"Deploy Now\" links.  These make the AWS Quick Start even easier to use by presetting all the instance types and instance counts based on the bill of materials for the user size.  This reduces the number of fields you need to fill out on the Quick Start form. The Deploy Now links are how we were able to reduce the number of clicks to deploy for 50,000 users to just 14.\n\nThe Quick Start takes about two hours to deploy regardless of the size of instance you choose.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/s3ZaBXYG8nc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How you can deploy GitLab for any number of users in a couple of hours\n\nThe YouTube playlist [Learning to provision the AWS Quick Start for GitLab on EKS](https://youtube.com/playlist?list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5) walks you through:\n\n1. [GitLab Reference Architectures, performance testing, cloud native hybrid and what is Gitaly](https://www.youtube.com/watch?v=1TYLv2xLkZY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=1&t=399s) (11mins)\n2. [An overview of GitLab AWS implementation patterns](https://www.youtube.com/watch?v=_x3I1aq7fog&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=2) (13mins)\n3. [An overview of AWS Quick Start for cloud native hybrid on EKS](https://www.youtube.com/watch?v=XHg6m6fJjRY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=3&t=8s) (9mins)\n4. [Provisioning Ready-To-Run GitLab for 50,000 users in 14 clicks and a long lunch)](https://www.youtube.com/watch?v=s3ZaBXYG8nc&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=4&t=798s) (21mins) - same as above video.\n5. [Easy performance testing an AWS Quick Start-provisioned GitLab cloud native hybrid instance](https://www.youtube.com/watch?v=QpkF1vXXCjk&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=5&t=510s) (32mins)\n\nIf you would like help getting started with Gitlab instance provisioning on AWS, please contact your GitLab account team or reach out to [GitLab Sales](https://about.gitlab.com/sales/)!\n",[1014,233,9],"cloud native",{"slug":1016,"featured":6,"template":703},"gitlab-cnh-for-50k-users","content:en-us:blog:gitlab-cnh-for-50k-users.yml","Gitlab Cnh For 50k Users","en-us/blog/gitlab-cnh-for-50k-users.yml","en-us/blog/gitlab-cnh-for-50k-users",{"_path":1022,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1023,"content":1029,"config":1034,"_id":1036,"_type":14,"title":1037,"_source":16,"_file":1038,"_stem":1039,"_extension":19},"/en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"title":1024,"description":1025,"ogTitle":1024,"ogDescription":1025,"noIndex":6,"ogImage":1026,"ogUrl":1027,"ogSiteName":686,"ogType":687,"canonicalUrls":1027,"schema":1028},"GitLab Duo + Amazon Q: Transform ideas into code in minutes","The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097127/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750097126673.png","https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo + Amazon Q: Transform ideas into code in minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-28\",\n      }",{"title":1024,"description":1025,"authors":1030,"heroImage":1026,"date":1031,"body":1032,"category":694,"tags":1033},[691],"2025-04-28","Have you ever spent days or even weeks converting a complex issue into working code? We've all been there. You start with a solid idea and a clear set of requirements, but the path from that initial concept to deployable code can be frustratingly long. Your productivity gets bogged down in implementation details, and projects that should move quickly end up dragging on.\n\nThis is where the power of [agentic AI](https://about.gitlab.com/topics/agentic-ai/) capabilities comes in. [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which combines the comprehensive AI-powered DevSecOps platform with the deepest set of cloud computing capabilities, is designed to dramatically accelerate your application development process, all within your familiar GitLab workflow. By streamlining your path from idea to deployment, this powerful integration can propose implementation solutions based on your issue descriptions alone – transforming what used to take days into something that happens in minutes.\n\n## How it works: From issue to working code\n\nLet's walk through how this agentic AI feature works in practice. Imagine you're a developer tasked with creating a mortgage calculator application. Here's how GitLab Duo with Amazon Q helps you get it done:\n\n1. **Create an issue with detailed requirements:** Start by creating a standard [GitLab issue](https://docs.gitlab.com/user/project/issues/). In the description, you'll provide a comprehensive list of requirements that your service needs to meet. This becomes the blueprint for your solution.\n\n2. **Invoke Amazon Q with a quick action:** Once your issue is created, simply add a comment with a quick action, “/q dev”, to invoke Amazon Q. This is where the magic begins. \n\n3. **Let AI generate your implementation:** GitLab Duo with Amazon Q analyzes the issue description you've provided and the context of your source code, then autonomously generates code that meets all your stated requirements. It doesn't stop there – it actually commits those changes in a merge request, ready for your review.\n\n![GitLab Duo  with Amazon Q activity pop-up screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097156/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097156018.png)\n\n4. **Review the generated application**: Navigate into the merge request to review the generated code. You can verify that all your requirements have been met and make any necessary adjustments.\n\n5. **Test the proposed application**: Finally, check that the application runs successfully. With minimal effort on your part, you now have working code that implements your original requirements.\n\n## Improve your development process\n\nGitLab Duo with Amazon Q completely transforms this process, including dramatically decreasing the time it takes to carry out complex developer tasks, through intelligent automation. By leveraging an agentic AI approach, you can accelerate your path from idea to deployment, freeing development teams to focus on more strategic work.\n\nWith GitLab Duo and Amazon Q, you'll develop software faster, more efficiently, and with less manual coding effort. This integration helps you:\n\n* **Save valuable development time** by automating implementation based on requirements  \n* **Maintain consistency** in code generation across your projects  \n* **Reduce the cognitive load** of translating requirements into working code  \n* **Accelerate your release cycles** by removing implementation bottlenecks  \n* **Focus your expertise** on reviewing and optimizing, rather than writing boilerplate code\n\nReady to see GitLab Duo with Amazon Q in action? Watch our demo video to discover how you can transform your development workflow today.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jxxzNst3jpo?si=j_LQdZhUnwqoQEst\" title=\"GitLab Duo with Amazon Q demo video for dev workflow\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[696,9,700,496,698,233],{"slug":1035,"featured":91,"template":703},"gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","content:en-us:blog:gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","Gitlab Duo Amazon Q Transform Ideas Into Code In Minutes","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"_path":1041,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1042,"content":1046,"config":1054,"_id":1056,"_type":14,"title":1057,"_source":16,"_file":1058,"_stem":1059,"_extension":19},"/en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock",{"config":1043,"title":1044,"description":1045},{"noIndex":6},"Own your AI: Self-Hosted GitLab Duo models with AWS Bedrock","Discover how to leverage AI while maintaining control over your data, infrastructure, and security posture.",{"title":1044,"description":1045,"authors":1047,"heroImage":1050,"body":1051,"category":694,"tags":1052,"date":1053},[1048,1049],"Chloe Cartron","Olivier Dupré","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098682/Blog/Hero%20Images/Blog/Hero%20Images/duo-blog-post_1Cy89R1pY8OMwyrgSB525O_1750098682075.png","As organizations adopt AI capabilities to accelerate their software\ndevelopment lifecycle, they often face a critical challenge: how to leverage\nAI while maintaining control over their data, infrastructure, and security\nposture. This is where [GitLab Duo\nSelf-Hosted](https://about.gitlab.com/gitlab-duo/) provides a compelling\nsolution.\n\nIn this article, we'll walk through the implementation of GitLab Duo Self-Hosted models. This comprehensive guide helps organizations needing to meet strict data sovereignty requirements while still leveraging AI-powered development. The focus is on using models hosted on AWS Bedrock rather than setting up an [LLM](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/) serving solution like vLLM. However, the methodology can be applied to models running in your own data center if you have the necessary capabilities.\n\n## Why GitLab Duo Self-Hosted?\n\nGitLab Duo Self-Hosted allows you to deploy GitLab's AI capabilities entirely within your own infrastructure, whether that's on-premises, in a private cloud, or within your secure environment.\n\n\nKey benefits include:\n\n* **Complete Data Privacy and Control:** Keep sensitive code and intellectual property within your security perimeter, ensuring no data leaves your environment.\n\n* **Model Flexibility:** Choose from a variety of models tailored to your specific performance needs and use cases, including Anthropic Claude, Meta Llama, Mistral families, and OpenAI GPT families.\n\n* **Compliance Adherence:** Meet regulatory requirements in highly regulated industries where data must remain within specific geographical boundaries.\n\n* **Customization:** Configure which GitLab Duo features use specific models to optimize performance and cost.\n\n* **Deployment Flexibility:** Deploy in fully air-gapped environments, on-premises, or in secure cloud environments.\n\n\n## Architecture overview\n\nThe GitLab Duo Self-Hosted solution consists of three core components:\n\n1. **Self-Managed GitLab instance**: Your existing GitLab instance where users interact with GitLab Duo features.\n\n2. **AI Gateway**: A service that routes requests between GitLab and your chosen LLM backend.\n\n3. **LLM backend**: The actual AI model service, which, in this article, will be AWS Bedrock.\n\n**Note:** You can use [another serving platform](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/) if you are running on-premises or using another cloud provider.\n\n\n![Air-gapped network flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/jws4h2kakflfrczftypj.png)\n\n\n## Prerequisites\n\nBefore we begin, you'll need:\n\n* A GitLab Premium or Ultimate instance (Version 17.10 or later)  \n\n  * We strongly recommend using the latest version of GitLab as we continuously deliver new features.\n\n* A GitLab Duo Enterprise add-on license  \n\n* AWS account with access to Bedrock models *or your API key and credentials needed to query your LLM Serving model*\n\n\n**Note:** If you aren't a GitLab customer yet, you can [sign up for a free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), which includes GitLab Duo Enterprise.\n\n## Implementation steps\n\n**1. Install the AI Gateway**\n\n\nThe AI Gateway is the component that routes requests between your GitLab instance and your LLM serving infrastructure — here that is AWS Bedrock. It can run in a Docker image. Follow the instructions from our [installation documentation](https://docs.gitlab.com/install/install_ai_gateway/) to get started. \n\n\nFor this example, using AWS Bedrock, you also must pass the AWS Key ID and Secret Access Key along with the AWS region.  \n\n\n```yaml\n\nAIGW_TAG=self-hosted-v18.1.2-ee`\n\ndocker run -d -p 5052:5052 \\\n\n  -e AIGW_GITLAB_URL=\u003Cyour_gitlab_instance> \\\n\n  -e AIGW_GITLAB_API_URL=https://\u003Cyour_gitlab_domain>/api/v4/ \\\n\n  -e AWS_ACCESS_KEY_ID=$AWS_KEY_ID\n\n  -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \\\n\n  -e AWS_REGION_NAME=$AWS_REGION_NAME \\\n\nregistry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway:$AIGW_TAG \\\n\n```\n\nHere is the [`AIGW_TAG` list](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/tags).\n\n\nIn this example we use Docker, but it is also possible to use the Helm chart. Refer to [the installation documentation](https://docs.gitlab.com/install/install_ai_gateway/#install-by-using-helm-chart) for more information.\n\n\n**2. Configure GitLab to access the AI Gateway**\n\n![Configure GitLab to access the AI Gateway](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/xj9kvljkqsacpsw41k4a.png)\n\nNow that the AI gateway is running, you need to configure your GitLab instance to use it.\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo**.  \n\n  - In the GitLab Duo section, select **Change configuration**.  \n\n  - Under Local AI Gateway URL, enter the URL for your AI gateway and port for the container (e.g., `https://ai-gateway.example.com:5052`).\n  \n  - Select **Save changes**.\n\n\n**3. Access models from AWS Bedrock** \n\n\nNext, you will need to request access to the available models on AWS Bedrock. \n\n\n  - Navigate to your AWS account and Bedrock.  \n\n  - Under **Model access**, select the models you want to use and follow the instructions to gain access. \n\n\nYou can find more information in the [AWS Bedrock documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html).\n\n\n**4. Configure the self-hosted model**\n\nNow, let's configure a specific AWS Bedrock model for use with GitLab Duo.\n\n![Add the self-hosted model screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/chrlgdvxwdetcszptsav.png)\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo Self-Hosted**.  \n\n  - Select **Add self-hosted model**.\n  \n  - Fill in the fields:  \n    * **Deployment name**: A name to identify this model configuration (e.g., \"Mixtral 8x7B\")  \n    * **Platform:** Choose AWS Bedrock  \n    * **Model family:** Select a model, for example here \"Mixtral\"  \n    * **Model identifier:** bedrock/`model-identifier` [from the supported list](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/).\n    \n  - Select **Create self-hosted model**.\n\n\n**5. Configure GitLab Duo features to use your self-hosted model**\n\n\nAfter configuring the model, assign it to specific GitLab Duo features.\n\n![Screen to configure self-hosted model features](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422793/an2i9s2p9cja2xx27g4z.png)\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo Self-Hosted**.  \n\n  - Select the **AI-powered features** tab.  \n\n  - For each feature (e.g., Code Suggestions, GitLab Duo Chat) and sub-feature (e.g., Code Generation, Explain Code), select the model you just configured from the dropdown menu.\n\n\nFor example, you might assign Mixtral 8x7B to Code Generation tasks and Claude 3 Sonnet to the GitLab Duo Chat feature.\n\nCheck out the [requirements documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/) to select the right model for the use case from the models compatibility list per Duo feature. \n\n## Verifying your setup\n\nTo ensure that your GitLab Duo Self-Hosted implementation with AWS Bedrock is working correctly, perform these verification steps:\n\n**1. Run the health check**\n\nAfter running the health check of your model to be sure that it’s up and running, Return to the GitLab Duo section from the Admin page and click on **Run health check**. This will verify if:   \n\n* The AI gateway URL is properly configured.  \n\n* Your instance can connect to the AI gateway.  \n\n* The Duo Licence is activated.   \n\n* A model is assigned to Code Suggestions — *as this is the model used to test the connection.*\n\n\n![Running the health check](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422793/yffw21yhjpwummw1ffsw.png)\n\n\nIf the health check reports issues, refer to the [troubleshooting guide](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/troubleshooting/%20%20%20) for common errors. \n\n\n**2. Test GitLab Duo features**\n\nTry out a few GitLab Duo features to ensure they're working:\n\n* In the UI, open GitLab Duo Chat and ask it a question.  \n\n* Open the web IDE  \n  * Create a new code file and see if Code Suggestions appears.  \n  * Select a code snippet and use the `/explain` command to receive an explanation from Duo Chat. \n\n**3. Check AI Gateway logs**\n\nReview the AI gateway logs to see the requests coming to the gateway from the selected model:\n\nIn your terminal, run:\n\n```yaml\ndocker logs \u003Cai-gateway-container-id>\n```\n\nOptional: In AWS, you can [activate CloudWatch and S3 as log destinations](https://docs.aws.amazon.com/bedrock/latest/userguide/model-invocation-logging.html). Doing so would enable you to see all your requests, prompts, and answers in CloudWatch.\n\n**Warning:** Keep in mind that activating these logs in AWS logs user data, which may not comply with privacy rules.\n\nAnd here you have full access to using GitLab Duo's AI features across the platform while retaining complete control over the data flow operating within the secure AWS cloud.\n\n## Next steps\n\n### Selecting the right model for each use case\n\nThe GitLab team actively tests each model's performance for each feature and provides [tier ranking of model's performance and suitability depending on the functionality:](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/#supported-models)\n\n- Fully compatible: The model can likely handle the feature without any loss of quality.  \n\n- Largely compatible: The model supports the feature, but there might be compromises or limitations.  \n\n- Not compatible: The model is unsuitable for the feature, likely resulting in significant quality loss or performance issues.\n\nAs of this writing, most GitLab Duo features can be configured with Self Hosted. The complete availability overview is available in the [documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/#supported-gitlab-duo-features). \n\n### Going beyond AWS Bedrock\n\nWhile this guide focuses on AWS Bedrock integration, GitLab Duo Self-Hosted supports multiple deployment options:\n\n1. [On-premises with vLLM](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/#vllm): Run models locally with vLLM for fully air-gapped environments.  \n\n2. [Azure OpenAI Service](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/#for-cloud-hosted-model-deployments): Similar to AWS Bedrock, you can use Azure OpenAI for models like GPT-4.\n\n## Summary\n\nGitLab Duo Self-Hosted provides a powerful solution for organizations that need AI-powered development tools while maintaining strict control over their data and infrastructure. By following this implementation guide, you can deploy a robust solution that meets security and compliance requirements without compromising on the advanced capabilities that AI brings to your software development lifecycle.\n\nFor organizations with stringent security and compliance needs, GitLab Duo Self-Hosted strikes the perfect balance between innovation and control, allowing you to harness the power of AI while keeping your code and intellectual property secure within your boundaries.\n\nWould you like to learn more about implementing GitLab Duo Self-Hosted in your environment? Please [reach out to a GitLab representative](https://about.gitlab.com/sales/) or [visit our documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/) for more detailed information.\n",[696,9],"2025-08-07",{"featured":91,"template":703,"slug":1055},"gitlab-duo-self-hosted-models-on-aws-bedrock","content:en-us:blog:gitlab-duo-self-hosted-models-on-aws-bedrock.yml","Gitlab Duo Self Hosted Models On Aws Bedrock","en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock.yml","en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock",{"_path":1061,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1062,"content":1067,"config":1073,"_id":1075,"_type":14,"title":1076,"_source":16,"_file":1077,"_stem":1078,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"title":1063,"description":1064,"ogTitle":1063,"ogDescription":1064,"noIndex":6,"ogImage":842,"ogUrl":1065,"ogSiteName":686,"ogType":687,"canonicalUrls":1065,"schema":1066},"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available","The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2025-04-17\",\n      }",{"title":1063,"description":1064,"authors":1068,"heroImage":842,"date":1070,"body":1071,"category":694,"tags":1072},[1069],"Emilio Salvador","2025-04-17","Today, we're excited to announce the general availability of [GitLab Duo with Amazon Q](https://about.gitlab.com/partners/technology-partners/aws/), delivering agentic AI throughout the software development lifecycle for AWS customers. GitLab Duo with Amazon Q, based on GitLab Ultimate, includes many familiar features such as code completion, code explanation, code generation, chat, and vulnerability explanation and resolution – all of which are now powered by Amazon Q. It is available with a Self-Managed deployment model for customers on Amazon Web Services (AWS).\n\nWith Amazon Q's agents directly embedded into GitLab's DevSecOps platform, developers maintain their familiar development environment while gaining powerful AI capabilities. The result is a frictionless experience that helps accelerate development cycles, reduce manual effort, and enhance code quality.\n\n“Participating in the early access program for GitLab Duo with Amazon Q has given us a glimpse into its transformative potential for our development workflows,” said Osmar Alonso, DevOps Engineer, Volkswagen Digital Solutions. “Even in its early stages, we saw how the deeper integration with autonomous agents could streamline our process, from code commit to production. We're excited to see how this technology empowers our team to focus on innovation and accelerate our digital transformation.\"\n\n## Agentic AI comes to complex customer environments\n\nBy combining agentic AI with secure, reliable cloud infrastructure, GitLab and AWS bring built-in security, scale, and reliability to complex customer environments, enabling them to realize the following benefits:\n\n__Unified developer experience for streamlined development__\n\nDevelopers can interact with Amazon Q through the GitLab Duo Chat interface from their preferred IDE or the GitLab web interface. This eliminates the need for context switching in other tools and helps developers stay focused on the project that they’re working on.\n\n__One solution for the entire software development lifecycle__\n\nCode suggestions and optimizations leverage AWS-specific patterns and practices, while testing tools understand AWS service interactions and dependencies. A common data store across all stages provides essential context to AI agents, enabling complete visibility and traceability for relevant actions.\n\n__Secure development with enterprise-grade guardrails__\n\nEnd-to-end security and compliance are built directly into the development platform with guardrails that help reduce risk without impeding velocity. This secure software development approach enforces transparency and auditability through AI agents while seamlessly integrating with AWS security services and compliance frameworks.\n\n## How to start using GitLab Duo with Amazon Q\n\nHere are five initial use cases we’re targeting to help teams build secure software faster with agentic AI: \n\n1. **Feature development acceleration** - Create issue descriptions, generate implementation plans based on your existing codebase, and produce complete merge requests ready for review. This drives feature delivery acceleration while maintaining consistency with internal development standards.  \n2. **Legacy application modernization** - Analyze your legacy Java codebase, create a comprehensive upgrade plan, and generate a merge request with all necessary code changes. This unlocks faster Java upgrade time, while providing a clear audit trail of all code transformations. Support for .NET and other languages is planned for future releases.  \n3. **Quality assurance enhancement** - Analyze code and automatically create comprehensive unit tests that understand your application logic and AWS service interactions. This increases test coverage, reduces manual test writing effort, and helps ensure consistent test quality across applications.  \n4. **Code review optimization** - Provide inline feedback on code changes, suggesting improvements based on development standards, highlighting security and performance considerations. This enables reduced code review cycles and delivery of higher-quality code merges for deployment.  \n5. **Vulnerability remediation** - Explain detected vulnerabilities in clear, detailed terms and one-click remediation based on recommended code changes, helping to significantly reduce the time from detection to remediation.\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> #### Get the benefits of GitLab Duo with Amazon Q today\n> GitLab's unified, AI-powered DevSecOps platform with Amazon Q's advanced AI capabilities provides AWS customers with a solution that transforms how teams build and deploy software. To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).",[696,496,9,698,699,742],{"slug":1074,"featured":91,"template":703},"gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","content:en-us:blog:gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","Gitlab Duo With Amazon Q Agentic Ai Optimized For Aws","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"_path":1080,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1081,"content":1086,"config":1091,"_id":1093,"_type":14,"title":1094,"_source":16,"_file":1095,"_stem":1096,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"title":1082,"description":1083,"ogTitle":1082,"ogDescription":1083,"noIndex":6,"ogImage":842,"ogUrl":1084,"ogSiteName":686,"ogType":687,"canonicalUrls":1084,"schema":1085},"GitLab Duo with Amazon Q: DevSecOps meets agentic AI","AI-powered DevSecOps enhanced with autonomous AI agents accelerates developer productivity, application modernization, and innovation.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: DevSecOps meets agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":1082,"description":1083,"authors":1087,"heroImage":842,"date":1088,"body":1089,"category":694,"tags":1090},[1069],"2024-12-03","We're excited to announce GitLab Duo with Amazon Q, a joint offering that brings together GitLab's comprehensive AI-powered DevSecOps platform with Amazon Q's autonomous AI agents in a single, integrated solution.\n\nGitLab Duo with Amazon Q transforms software development by integrating powerful AI agents directly into your daily workflows. Instead of switching between multiple tools, developers can now accelerate key tasks — from feature development to code reviews — all from within GitLab's comprehensive DevSecOps platform. Amazon Q’s AI agents act as intelligent assistants, automating time-consuming tasks like generating code from requirements, creating unit tests, conducting code reviews, and modernizing Java applications. By handling these complex tasks, this joint offering helps teams focus on innovation, while maintaining security and quality standards.\n\nThis enterprise-class developer experience includes:\n* The GitLab unified platform with one single data store, which automates the building, testing, packaging, and deployment of secure code\n* GitLab Duo, enhanced with Amazon Q developer, which leverages GitLab project context to generate multi-file changes based on the task\n* Amazon Q AI agents integrated with GitLab Duo, updating issues and creating merge requests per task, with permission scoped to the project\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1033653810?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo and Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Partnership innovation: GitLab and AWS\n\nGitLab Duo with Amazon Q is the result of close collaboration between GitLab and AWS engineering teams, combining our strengths to transform software development. This partnership unites GitLab's expertise in unified DevSecOps with AWS's leadership in cloud computing, creating an innovative solution that understands how developers work.\n\nBy integrating Amazon Q's autonomous agents with GitLab's comprehensive AI-powered platform, we've built more than a technical integration. We've created an experience that makes AI-powered development feel natural and upholds the security, compliance, and reliability that enterprises require.\n\nIndustry analysts recognize the significance of this integration in advancing AI-powered software development:\n\n***\"With this joint offering, GitLab and AWS are combining their strengths to make agentic AI a reality in software development,\" said Katie Norton, Research Manager at IDC. \"GitLab Duo with Amazon Q addresses strong use cases and critical challenges that empower customers to harness the full potential of AI.\"***\n\n***\"Both developers and the organizations they work for are increasingly interested in simplified and unified experiences,\" says Rachel Stephens, senior analyst at RedMonk. \"Especially in the era of AI – when security and privacy are paramount concerns – organizations want to both harness the power of cutting edge technology while also controlling risk and minimizing disjointed software tool chains. The partnership between GitLab Duo and Amazon Q seeks to give developers the tools they need within the context of an end-to-end DevSecOps experience.\"***\n\n## 4 key customer benefits \n\nGitLab Duo with Amazon Q pairs AI-powered DevSecOps with the deepest set of cloud computing capabilities. Together, they help development teams:\n\n### 1. Streamline feature development from idea to code \n\nDevelopment teams often spend hours translating requirements into code, leading to slower delivery and inconsistent implementation. You can now invoke the GitLab Duo with Amazon Q agent by utilizing a new quick action `/q dev`, which will convert an issue description directly into merge-ready code in minutes. The agent analyzes requirements, plans the implementation, and generates a complete merge request — all while adhering to your team's development standards. Teams can iterate rapidly using feedback in comments, significantly reducing the time from idea to production.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050110?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Feature Dev with Rev\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Modernize legacy code without the headache \n\nUpgrading Java applications traditionally requires weeks of careful planning, manual code changes, and extensive testing. By using quick action `/q transform`, you can change this by automating the entire Java modernization process. In minutes, not hours, the agent analyzes your Java 8 or 11 codebase, creates a comprehensive upgrade plan, and generates fully documented merge requests for Java 17 migration. Every change is tracked and traceable, giving teams confidence while improving application security and performance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050145?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"QCT\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. Accelerate code reviews without sacrificing quality \n\nCode reviews often create bottlenecks: Teams wait days for feedback yet must maintain consistent standards. With the `/q review` quick action, you get instant, intelligent feedback on code quality and security directly in merge requests. By automatically identifying potential issues and suggesting improvements based on your standards, teams can maintain high-quality code while dramatically reducing review cycles.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050136?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Reviews\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Automate testing to ship with confidence\n\nManual test creation is time-consuming and often leads to inconsistent coverage across teams. With the `/q test` quick action, you can automatically generate comprehensive unit tests that understand your application logic. The agent ensures thorough coverage of critical paths and edge cases, matching your existing testing patterns. This automation helps teams catch issues earlier and maintain consistent quality standards, saving valuable developer time.\n\n\u003Cdiv style=\"padding:54.37% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050181?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Use GitLab Duo with Amazon Q to add tests\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Enterprise-grade security and guardrails included\n\nBuilt for enterprise scale and security, this offering combines GitLab's integrated security, compliance, and privacy with Amazon Q's AI agent, accelerating developer workflows to help organizations ship secure software faster.\n\nThe integration features:\n\n* Built-in guardrails that maintain development velocity  \n* Granular controls for AI-powered features at user, project, and group levels  \n* End-to-end security integration with existing workflows\n\nDevSecOps teams can securely scale the development environment with the world's most broadly adopted cloud.\n\n## What's next\n\nGitLab Duo with Amazon Q builds on our existing integration with [AWS announced in May 2024](https://press.aboutamazon.com/2024/4/aws-announces-general-availability-of-amazon-q-the-most-capable-generative-ai-powered-assistant-for-accelerating-software-development-and-leveraging-companies-internal-data), representing a significant step forward in our joint mission to transform software development. This deeper integration of AI capabilities marks the beginning of our expanded collaboration with AWS. As we continue to evolve these capabilities, we'll focus on:\n\n* Extending AI features across the development lifecycle  \n* Enhancing developer productivity  \n* Meeting enterprise development demands at scale\n\n**GitLab Duo with Amazon Q is available today on a [public branch](https://gitlab.com/groups/gitlab-org/-/epics/16059) in the GitLab.org project. To get access to a preview and learn more about how it can transform your software development process, visit [our website](https://about.gitlab.com/partners/technology-partners/aws/#interest).**",[742,9,696,496,233],{"slug":1092,"featured":91,"template":703},"gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","content:en-us:blog:gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","Gitlab Duo With Amazon Q Devsecops Meets Agentic Ai","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"_path":1098,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1099,"content":1105,"config":1110,"_id":1112,"_type":14,"title":1113,"_source":16,"_file":1114,"_stem":1115,"_extension":19},"/en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"title":1100,"description":1101,"ogTitle":1100,"ogDescription":1101,"noIndex":6,"ogImage":1102,"ogUrl":1103,"ogSiteName":686,"ogType":687,"canonicalUrls":1103,"schema":1104},"GitLab is now available as an AWS CodeStar Connections provider","AWS released native CodePipeline integration for GitLab projects and repos, helping to ensure a best-in-class experience when using GitLab and AWS together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098884/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098884409.jpg","https://about.gitlab.com/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now available as an AWS CodeStar Connections provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-01-11\",\n      }",{"title":1100,"description":1101,"authors":1106,"heroImage":1102,"date":1107,"body":1108,"category":786,"tags":1109},[718],"2024-01-11","The GitLab DevSecOps Platform now integrates natively with many AWS services through AWS CodeStar Connections and AWS CodePipeline. This long-awaited integration was recently completed by the AWS CodeSuite service team for GitLab.com SaaS, GitLab Self-Managed, and GitLab Dedicated. AWS CodeStar Connections is a utility layer, which means other AWS services can enable native GitLab integration with less work.\n\nOnce created, CodeStar Connections objects can be used directly to integrate with many AWS services such as:\n- AWS CodePipeline,\n- Amazon CodeWhisperer Customization Capability,\n- AWS Service Catalog\n- AWS Glue\n\nWhen a CodeStar Connection is used to configure a GitLab CodePipeline configuration it can further support:\n- AWS CodeBuild\n- Amazon SageMaker MLOps Projects\n- AWS CodeDeploy\n\nGitLab and AWS have been working at ever deeper levels of technical and business integration to ensure that our co-customers have a best-in-class experience when using GitLab and AWS together.\n\n![AWS CodeStar integration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098900704.png)\n\nCheck out the complete list of AWS Services that are now directly accessible in the [GitLab AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html).\n\n![CodeStar - New Technology and Solutions for using GitLab and AWS Together ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/AWS_re_Invent_2023__New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together__4__aHR0cHM6_1750098900705.png)\n\n## Resources\n\n- GitLab [AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html) is a one-stop location for these new integrations as well as existing integrations\n- AWS documentation for [setting up CodeStar Connections with GitLab.com SaaS](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n- AWS documentation for [setting up CodeStar Connections with self-managed GitLab](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n - AWS documentation for [configuring AWS CodePipeline integration](https://docs.gitlab.com/ee/user/project/integrations/aws_codepipeline.html)\n- [AWS announcement for GitLab CodePipeline Integration for GitLab SaaS](https://aws.amazon.com/about-aws/whats-new/2023/08/aws-codepipeline-supports-gitlab/) and [AWS announcement for GitLab Self-Managed](https://aws.amazon.com/about-aws/whats-new/2023/12/codepipeline-gitlab-self-managed/)\n\n![codestar-amazonpartnerlogo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098900705.png)\n",[9,109,284,233],{"slug":1111,"featured":6,"template":703},"gitlab-is-now-available-as-an-aws-codestar-connections-provider","content:en-us:blog:gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","Gitlab Is Now Available As An Aws Codestar Connections Provider","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"_path":1117,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1118,"content":1124,"config":1130,"_id":1132,"_type":14,"title":1133,"_source":16,"_file":1134,"_stem":1135,"_extension":19},"/en-us/blog/little-things-make-a-difference",{"title":1119,"description":1120,"ogTitle":1119,"ogDescription":1120,"noIndex":6,"ogImage":1121,"ogUrl":1122,"ogSiteName":686,"ogType":687,"canonicalUrls":1122,"schema":1123},"Little things make a difference","Let's celebrate the small UI refinements that add up to create a big impact","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/little-things-make-a-difference","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Little things make a difference\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christie Lenneville\"}],\n        \"datePublished\": \"2021-02-12\",\n      }",{"title":1119,"description":1120,"authors":1125,"heroImage":1121,"date":1127,"body":1128,"category":972,"tags":1129},[1126],"Christie Lenneville","2021-02-12","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWhen you're busy focusing on the big picture of feature improvement work, it can be easy to forget the value of tiny refinements. But when you add them all up, fixing little \"paper cuts\" can have a meaningful impact on user experience. \n\nThat's why I was so excited to see the [GitLab UI Polish Gallery](https://nicolasdular.gitlab.io/gitlab-polish-gallery/) created by [Nicolas Dular](https://gitlab.com/nicolasdular), a Senior Fullstack Engineer on our Growth team. It highlights small refinement contributions &#151; like adjusting alignment, spacing, and type scale &#151; that are easy to overlook. But seeing them in aggregate, you quickly realize what a difference they make.\n\nFor me, the most inspiring part of the gallery was seeing such a diverse group of people contribute to making our product the best it can be. Developers, designers, and members of the wider GitLab community (special shout out to [Yogi](https://gitlab.com/yo)) all care enough about our product experience to put time into small changes.\n\nHere are a few examples, but I encourage you to check out the gallery for yourself!\n\n## Polishing the Jira Connect app\n\nOur [Jira Connect](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud?hosting=cloud&tab=overview) app helps customers use GitLab in coordination with Jira for a more seamless developer experience. [Libor Vanc](https://gitlab.com/lvanc) (Senior Product Designer) and [Justin Ho](https://gitlab.com/justin_ho) (Senior Frontend Engineer) on our Ecosystem team made some light changes to the app's type scale and CTAs that make the app much simpler to visually parse. What a nice change!\n\n![GitLab Jira Connect app](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/jira-connect-gitlab.png)\n\n## Addressing alignment problems in the merge request widget\n\nMerge requests are central to our user experience, and we're working hard to make the experience exceptional. When Staff Product Designer, [Pedro Moreira da Silva](https://gitlab.com/pedroms), noticed alignment problems in the MR widget, he worked with Senior Frontend Engineer, [Jacques Erasmus](https://gitlab.com/jerasmus), to address them. It was a very subtle change that will impact millions of users.\n\n![Reply box in diffs](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/widget-alignment.png)\n\n## Fixing the vertical alignment in card headers\n\nThis change is so subtle that it's hard to even notice, but the vertical alignment in the card header of our on-demand security scans was off by mere pixels. Product Designer, [Annabel Dunstone Gray](https://gitlab.com/annabeldunstone), noticed the [problem](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50550#note_480509692) during an MR review, and Frontend Engineer, [Paul Gascou Vaillancourt](https://gitlab.com/pgascouvaillancourt), jumped in to fix it in the same release.\n\n![Card header vertical alignment](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/card-header.png)\n\n## More to come!\n\nWe make visual refinements all of the time, so this is just a start to what you'll see in the [GitLab UI Polish Gallery](https://nicolasdular.gitlab.io/gitlab-polish-gallery/). I'll personally be checking in from time to time to remind myself of the little things that make a big difference.\n\n",[745,9],{"slug":1131,"featured":6,"template":703},"little-things-make-a-difference","content:en-us:blog:little-things-make-a-difference.yml","Little Things Make A Difference","en-us/blog/little-things-make-a-difference.yml","en-us/blog/little-things-make-a-difference",{"_path":1137,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1138,"content":1144,"config":1151,"_id":1153,"_type":14,"title":1154,"_source":16,"_file":1155,"_stem":1156,"_extension":19},"/en-us/blog/lockheed-martin-aws-gitlab",{"title":1139,"description":1140,"ogTitle":1139,"ogDescription":1140,"noIndex":6,"ogImage":1141,"ogUrl":1142,"ogSiteName":686,"ogType":687,"canonicalUrls":1142,"schema":1143},"GitLab, AWS help strengthen Lockheed Martin’s digital transformation","Lockheed Martin’s software factory selected GitLab’s DevSecOps Platform, along with AWS, to streamline toolchains, increase collaboration, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668830/Blog/Hero%20Images/lockheed-martin-cover-2.jpg","https://about.gitlab.com/blog/lockheed-martin-aws-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab, AWS help strengthen Lockheed Martin’s digital transformation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2023-05-16\",\n      }",{"title":1139,"description":1140,"authors":1145,"heroImage":1141,"date":1147,"body":1148,"category":786,"tags":1149},[1146],"GitLab","2023-05-16","\nLockheed Martin launched its 1LMX initiative to transform its end-to-end business processes and systems. One focus of the transformation was to pare down the company’s wide variety of DevOps tools – each program or product line at Lockheed Martin had its own toolchain. To mitigate this issue, drive rapid production, and increase collaboration, Lockheed Martin adopted GitLab’s DevSecOps Platform, run on AWS.\n\n“GitLab has strengthened our 1LMX transformation, upgrading the way we collaborate and innovate to develop software. Now, all of our programs have access to a high-quality software development environment,” said Alan Hohn, Lockheed Martin’s Director of Software Strategy.\n\nGitLab’s DevSecOps Platform enables Lockheed Martin to ship software more efficiently and securely for thousands of their programs, ranging from satellite platforms and aerospace systems to ground control software and maritime surface and subsurface software.\n\nHere are some top-level benefits that Lockheed Martin has seen with GitLab’s DevSecOps Platform:\n* Using GitLab’s single platform, Lockheed Martin’s legacy projects are delivered to testing every six days, down from a monthly cadence using distributed toolchains.   \n* Developers experienced a 90% reduction in time spent on system maintenance.\n* The organization has seen 200% annual growth in adoption of The DevSecOps Platform.\n* AWS enabled automated Infrastructure as Code for a scalable and resilient cloud architecture.\n\n## Efficiency gains\n\nIn migrating to GitLab, Lockheed Martin has realized a number of benefits and eliminated obstacles. In three and a half years, Lockheed Martin has created 64,000 projects on GitLab, and created 110,000 continuous integration builds daily. \n\nAdditionally, they were able to retire thousands of separately maintained servers thereby reducing time spent on maintenance by 90%. GitLab further enables internal efficiency within the organization by allowing teams to securely share reusable code components in globally accessible environments. Since implementing GitLab, Lockheed Martin teams have added 18 new repositories a day for the past two years. \n\n## How GitLab, AWS, and Lockheed Martin work together\n\nIn 2022, after rapid adoption of GitLab created the need for a more scalable solution, Lockheed Martin, GitLab, and AWS worked together to automate and optimize Lockheed Martin's code deployment across the enterprise. \n\nThe solution started with a well-architected review of the design between Lockheed Martin, AWS, and GitLab. AWS then helped to automate and optimize the Lockheed Martin GitLab deployment for continuous integration and continuous delivery (CI/CD) environment by delivering Infrastructure as Code to deploy the environment in two hours vs. several hours previously. \n\nThe AWS team also established workflows to deliver a fully automated, highly available, disaster recovery-compliant, scalable architecture for GitLab enabling a consistent process that runs without manual intervention.\n\nAWS supported load balancing to auto-scale the deployment process based on developer demand for pipeline runs and user traffic so that developers are not waiting on their deployments to execute. Pre-migration testing was performed to establish baselines, followed by post-migration testing to measure performance and scalability gains in delivering faster deployments. \n\nAdditionally, monitoring and security controls were implemented to comply with Lockheed Martin policies. As a result, the team was able to deliver operational efficiencies with the number of build requests waiting to be processed decreasing from 200 to zero, and reduced time for code deployment across the enterprise.\n\nThis effort showcased how large enterprises with thousands of software developers can build and deploy automated, scalable, and resilient code pipelines in the cloud using platforms such as GitLab by leveraging AWS best practices.\n\nGitLab’s Chief Product Officer David DeSanto added, “For more than a century, Lockheed Martin has set the standard for innovation within the public sector, and demonstrates what is possible when organizations invest in digital transformation efforts.”\n\nLockheed Martin has 20,000 GitLab users, and is looking to double that number and migrate even more of their projects over to The DevSecOps Platform in the coming years. To dig deeper into how Lockheed Martin uses GitLab, read [our case study](/customers/lockheed-martin), and to learn more about GitLab for the Public Sector, visit [our site](/solutions/public-sector/).\n",[1150,9,928],"customers",{"slug":1152,"featured":6,"template":703},"lockheed-martin-aws-gitlab","content:en-us:blog:lockheed-martin-aws-gitlab.yml","Lockheed Martin Aws Gitlab","en-us/blog/lockheed-martin-aws-gitlab.yml","en-us/blog/lockheed-martin-aws-gitlab",{"_path":1158,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1159,"content":1165,"config":1171,"_id":1173,"_type":14,"title":1174,"_source":16,"_file":1175,"_stem":1176,"_extension":19},"/en-us/blog/microcks-and-gitlab-part-one",{"title":1160,"description":1161,"ogTitle":1160,"ogDescription":1161,"noIndex":6,"ogImage":1162,"ogUrl":1163,"ogSiteName":686,"ogType":687,"canonicalUrls":1163,"schema":1164},"Speed up API and microservices delivery with Microcks and GitLab - Part 1","Learn how to configure Microcks for GitLab and what the use cases are for this open source Kubernetes-native tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683021/Blog/Hero%20Images/lightsticks.png","https://about.gitlab.com/blog/microcks-and-gitlab-part-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up API and microservices delivery with Microcks and GitLab - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":1160,"description":1161,"authors":1166,"heroImage":1162,"date":1168,"body":1169,"category":721,"tags":1170},[1167],"Madou Coulibaly","2023-09-27","\n\nAPI development is all the rage these days for customer and partner integration, frontend-to-backend communication, microservices orchestration, and more. Yet APIs have their challenges, including how to create a fast feedback loop on design, how different teams can work with autonomy without having to wait for each other's API implementation, and how to cope with backward compatibility tests when shipping newer versions of the API. \n\n[Microcks](https://microcks.io), an open source, Kubernetes-native tool for API mocking and testing, addresses these challenges. With Microcks, which is accepted as a Sandbox project in the [Cloud Native Computing Foundation](https://cncf.io), developers can leverage their [OpenAPI](https://www.openapis.org/), [GraphQL](https://graphql.org/), [gRPC](https://grpc.io/), [AsyncAPI](https://www.asyncapi.com/), and [Postman Collection](https://www.postman.com/collection/) assets to quickly mock and simulate APIs before writing them. Couple Microcks with GitLab and you have a powerful combination to foster collaboration, encourage rapid changes, and provide a robust delivery platform for API-based applications.\n\nIn this ongoing blog series, we will introduce you to Microcks use cases and how they fit with the GitLab platform. We'll also discuss technical integration points that will help ease the developer burden, including identity management, Git repositories, and pipeline integrations.\n\n## What is Microcks?\nMicrocks addresses two major use cases: \n- **Simulating (or mocking) an API or a microservice** from a set of descriptive assets. This can be done as soon as you start the design phase to set up a feedback loop very quickly, or later on to ease the pain of provisioning environments with a lot of dependencies.\n- **Validating the conformance of your application regarding your API specification** by running contract-test. This validation can be integrated into your CI/CD pipeline so that conformance can be checked on each and every iteration. This is of great help to enforce backward compatibility of your API of microservices interfaces.\n\nMicrocks offers a uniform and consistent approach for the various kinds of request/response APIs (REST, GraphQL, gRPC, Soap) and event-driven APIs (currently supporting eight different protocols), thereby bringing consistency for users and for automations all along your API lifecycle.\n\n## How Microcks fits into the software development lifecycle\nMicrocks is a solution based on containers and can be deployed in several configurations. It can be deployed on the developer laptop through [Docker](https://microcks.io/documentation/installing/docker-compose/), [Podman](https://microcks.io/documentation/installing/podman-compose/) or [Docker Desktop Extension](https://microcks.io/documentation/installing/docker-desktop-extension/) to assist with mocking complex environments. When it comes to team collaboration, Microcks can be deployed as a centralized instance that connects to the Git repositories of the organization, discovers the API artifacts, and then provides shared up-to-date API simulations.\n\n![diagram of how Microcks fits into development lifecycle](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks.png){: .shadow.small.center}\n\nTo ease the burden on developers (and administrators), Microcks can be configured to use your GitLab platform as an identity provider. With that configuration, integrating Microcks is seamless, and API simulations are automatically shared among development teams. Microcks fosters collaboration by providing everyone with the same “source of truth” and avoiding drift risks. The tool can also be used to lower the pain and the cost of deploying and maintaining complex QA environments because simulations are inexpensive to deploy or redeploy on-demand. Microcks deployment follows a GitOps approach.\n\nBeyond this sharing of simulations, Microcks also integrates well with CI/CD pipelines. As you release API-based applications, there is always concern about conformance of the contractualized expectations you defined using specifications like OpenAPI, GraphQL, and the like. Usually, the hardest part isn't delivering the `1.0` of this API; problems come later when you're trying to deliver the `1.3`. This latest version must still be backward compatible with the 1.0 contract if you don't want to make your consumers angry and frustrated.\n\nThis conformance validation is very well assured by Microcks using contract-testing principles. So we encourage you to plug Microcks into some `test` related jobs in your GitLab pipeline and delegate this conformance validation to your Microcks instance.\n\n![microcks-in-gitlab-workflow](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-in-gitlab-workflow.png){: .shadow.medium.center}\n\n\nEmbedding Microcks conformance testing in your pipeline is actually easy thanks to our lightweight CLI that you'll integrate in pipeline jobs. You can choose to reuse an existing Microcks instance to record results and keep history of your success or pop up a new ephemeral instance as it's lightweight and fast to bootstrap.\n\n## How to set up GitLab as an identity provider in Microcks\n\nTo start off this series, we will detail how to configure Microcks to use your GitLab platform as an identity provider. This is in fact very easy as authentication in Microcks is based on [Keycloak](https://keycloak.org) (another CNCF project) and GitLab can be set as an identity provider in Keycloak (see [official documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#gitlab)).\n\n**Note:** This configuration is optional as Microcks can use any other identity provider Keycloak integrates with.\n\nKeycloak is a very common solution that may be deployed already at your organization. If not, Microcks comes with a Keycloak distribution that is pre-configured for its usage with a realm called `microcks`. We have used this realm to validate this configuration.\n\n### Create a GitLab Group Application\nThe first thing is to create a new [Group Application](https://docs.gitlab.com/ee/integration/oauth_provider.html#create-a-group-owned-application) on your GitLab instance as follows:\n- `Name`: `microcks-via-keycloak`\n- `Redirect URI`: `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint`\n- `Scopes`: `read_user`, `openid`, `profile` and `email`\n\n![gitlab-application-form](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application-form.png){: .shadow.medium.center}\n\n\nThis application uses your Keycloak instance with `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint` as the redirect URI. As a result, we obtain an `Application ID` and an associated `Secret` we have to keep aside for the next step.\n\n![gitlab-application](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application.jpeg){: .shadow.medium.center}\n\n\n### Add GitLab as identity provider in Keycloak\nThe next step takes place in the Keycloak admin console. Once the correct `microcks` realm is selected, you'll just have to go to the **Identity providers** section and add a GitLab provider. Simply paste here the `Application ID` you got earlier as `Client ID` and the `Secret` as `Client Secret`. You can also choose a `Display order` if you plan to have multiple identity providers.\n\n![keycloak-identity-provider](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-identity-provider.jpg){: .shadow.medium.center}\n\n\nThen, from the **Authentication** section in the admin console, choose the browser flow and configure the `Identity Provider Redirector` as follows:\n\n- `Alias`: `GitLab`\n- `Default Identify Provider`: `gitlab`\n\n![keycloak-redirector](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-redirector.jpg){: .shadow.medium.center}\n\n### Test your Microcks configuration\nNow open the Microcks URL into your browser and you'll be directly redirected to the GitLab login page. Enter your GitLab credentials and you will be authenticated and redirected to Microcks. \n\n![microcks-homepage](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-homepage.jpeg){: .shadow.medium.center}\n\n## What's next?\nIn upcoming blogs, we'll detail how GitLab can be used in the two major use cases for Microcks. We'll see how Microcks integrates with GitLab Git repositories to discover API specifications and produce simulations, and how to integrate Microcks conformance tests into your GitLab CI/CD pipelines.\n\n_[Laurent Broudoux](https://www.linkedin.com/in/laurentbroudoux/) is a cloud-native architecture expert and enterprise integration problem lover. He has helped organizations in adopting distributed and cloud paradigms while capitalizing on their critical existing assets. He is the founder and lead developer of the [Microcks.io](https://microcks.io/) open-source project: a Kubernetes-native tool for API mocking and testing. For this, he is using his 10+ years experience as an architect in financial services where he defined API transformation strategies, including governance and delivery process._\n\n_[Madou Coulibaly](https://gitlab.com/madou) is a senior solutions architect at GitLab._\n",[9,887,109,1014,233],{"slug":1172,"featured":6,"template":703},"microcks-and-gitlab-part-one","content:en-us:blog:microcks-and-gitlab-part-one.yml","Microcks And Gitlab Part One","en-us/blog/microcks-and-gitlab-part-one.yml","en-us/blog/microcks-and-gitlab-part-one",{"_path":1178,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1179,"content":1185,"config":1191,"_id":1193,"_type":14,"title":1194,"_source":16,"_file":1195,"_stem":1196,"_extension":19},"/en-us/blog/secure-composition-analysis-bug-not-updating-database",{"title":1180,"description":1181,"ogTitle":1180,"ogDescription":1181,"noIndex":6,"ogImage":1182,"ogUrl":1183,"ogSiteName":686,"ogType":687,"canonicalUrls":1183,"schema":1184},"Bug found and resolved in Dependency Scanning","Some customers will need to take specific action to manually update their Dependency Scanning image to receive a bug fix.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/secure-composition-analysis-bug-not-updating-database","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Bug found and resolved in Dependency Scanning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nicole Schwartz\"}],\n        \"datePublished\": \"2021-02-19\",\n      }",{"title":1180,"description":1181,"authors":1186,"heroImage":1182,"date":1188,"body":1189,"category":972,"tags":1190},[1187],"Nicole Schwartz","2021-02-19","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nDependency Scanning relies on the GitLab [Vulnerability Database](https://about.gitlab.com/direction/secure/vulnerability-research/advisory-database/) (called [gemnasium-db](https://gitlab.com/gitlab-org/security-products/gemnasium-db)) to provide it with the latest advisory data (i.e. CVEs). Dependency Scanning docker images are built and released with the latest version of the database and in addition, the analyzers update this database to the latest version at the time of a scan. \n\nHowever, starting with version 2.8.1 of the Dependency Scanning analyzer called gemnasium, the vulnerability database was [not updating itself at scan time](https://gitlab.com/gitlab-org/gitlab/-/issues/294296). Versions between v2.8.1 (released 2020-03-30) and v2.28.0 (released 2021-02-03) are affected by this bug. As a result, since the introduction of the bug, scan results would only be able to identify advisories published on or before the analyzer image release date. In some cases this meant that the advisories' Dependency Scanning analyzers were outdated by several weeks (relying only on the database checked out at image build time).\n\nWe are concerned that this bug made it out to customers and are performing a [root cause analysis](https://gitlab.com/gitlab-org/gitlab/-/issues/321315).\n\nMost customers will receive the bug fix automatically and will have the latest advisory database the next time their Dependency Scanning jobs run. But customers with their own copy of the GitLab container registry or dedicated runners with a docker pull-policy other than always, must take the manual action to pull or update your pin to the latest image (or at least one that is not impacted by this bug). Users that must take this manual action are:\n\n- Customers with an edited Dependency Scanning template that pins their analyzers to a non-major-only tag (for example gemnasium:2.27.0 rather than gemnasium:2)\n- Customers running in an [Offline Environment](https://docs.gitlab.com/ee/user/application_security/offline_deployments/) with their own container registry mirroring GitLab's\n- Self-managed customers or customers with their own docker runners using a pull policy other than `always`\n\nThe three analyzer types that are affected are the gemnasium analyzer, the gemnasium-python and gemnasium-maven analyzer. The affected versions of each are:\n\n- gemnasium v2.8.1 to v2.28.0: update to v2.28.1 or above\n- gemnasium-python v2.11.0 to v2.17.2: update to v2.17.3 or above\n- gemnasium-maven v2.13.0 to v2.20.3: update to v2.20.4 or above\n\nTL;DR - If you are using Dependency Scanning analyzers and are not always pulling their docker images from GitLab's docker container registry, please update your analyzers' docker images promptly in order to sync the analyzers with the latest available advisories.\n\n{: .note}\n",[699,929,9],{"slug":1192,"featured":6,"template":703},"secure-composition-analysis-bug-not-updating-database","content:en-us:blog:secure-composition-analysis-bug-not-updating-database.yml","Secure Composition Analysis Bug Not Updating Database","en-us/blog/secure-composition-analysis-bug-not-updating-database.yml","en-us/blog/secure-composition-analysis-bug-not-updating-database",{"_path":1198,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1199,"content":1204,"config":1209,"_id":1211,"_type":14,"title":1212,"_source":16,"_file":1213,"_stem":1214,"_extension":19},"/en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"title":1200,"description":1201,"ogTitle":1200,"ogDescription":1201,"noIndex":6,"ogImage":842,"ogUrl":1202,"ogSiteName":686,"ogType":687,"canonicalUrls":1202,"schema":1203},"Speed up code reviews: Let AI handle the feedback implementation","Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.","https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up code reviews: Let AI handle the feedback implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-10\",\n      }",{"title":1200,"description":1201,"authors":1205,"heroImage":842,"date":1206,"body":1207,"category":694,"tags":1208},[691],"2025-06-10","You know that feeling when you've just submitted a merge request and the code review comments start rolling in? One reviewer wants the labels updated, another asks for side-by-side layouts, someone else requests bold formatting, and don't forget about that button color change. Before you know it, you're spending hours implementing feedback that, while important, takes you away from building new features. It's a time-consuming process that every developer faces, yet it feels like there should be a better way.\n\nWhat if you could have an AI assistant that understands code review feedback and automatically implements the changes for you? That's exactly what [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) brings to your development workflow. This seamless integration combines GitLab's comprehensive DevSecOps platform with Amazon Q's advanced AI capabilities, creating an intelligent assistant that can read reviewer comments and converts them directly into code changes. Instead of manually addressing each piece of feedback, you can let AI handle the implementation while you focus on the bigger picture.\n\n## How GitLab Duo with Amazon Q works\n\nWhen you're viewing a merge request with reviewer comments, you'll see feedback scattered throughout your code. Let's take the examples from earlier in this article: maybe you've received a request to update a form label here, a suggestion to display fields side-by-side there, or a note about making certain text bold. Each comment represents a task that normally you'd need to handle manually.\n\n![feedback on an MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/1-show-comment.png)\n\nWith GitLab Duo with Amazon Q, you can simply enter the `/q dev` quick action in a comment. This prompts Amazon Q to analyze all the feedback and start modifying your code automatically. The AI agent understands the context of each comment and implements the requested changes directly in your codebase.\n\n![/q dev function prompting Amazon Q to analyze feedback](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/2-invoke-q-dev.png)\n\nOnce Amazon Q processes the feedback, you can view all the updates in the \"Changes\" tab of your merge request. Every modification is clearly visible, so you can verify that the AI agent correctly interpreted and implemented each piece of feedback. You can then run your updated application to confirm that all the changes work as expected — that form label is updated, the fields are displayed side-by-side, the text is bold, and yes, that button is now blue.\n\nWatch the code review feedback process in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/31E9X9BrK5s?si=ThFywR34V3Bfj1Z-\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nProcessing code review feedback is a necessary but time-intensive part of software development.  GitLab Duo with Amazon Q evolves this manual process into an automated workflow, dramatically reducing the time between receiving feedback and implementing changes. By letting AI handle these routine modifications, you're free to focus on what really matters — building innovative features and solving complex problems.\n\nWith GitLab Duo with Amazon Q, you can:\n- Eliminate hours of manual feedback implementation\n- Accelerate your code review cycles\n- Maintain consistency in how feedback is addressed\n- Reduce context switching between reviewing comments and writing code\n- Ship features faster with streamlined deployment times\n\n> #### To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)",[698,699,496,9,697,700],{"slug":1210,"featured":91,"template":703},"speed-up-code-reviews-let-ai-handle-the-feedback-implementation","content:en-us:blog:speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","Speed Up Code Reviews Let Ai Handle The Feedback Implementation","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"_path":1216,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1217,"content":1223,"config":1232,"_id":1234,"_type":14,"title":1235,"_source":16,"_file":1236,"_stem":1237,"_extension":19},"/en-us/blog/tracking-down-missing-tcp-keepalives",{"title":1218,"description":1219,"ogTitle":1218,"ogDescription":1219,"noIndex":6,"ogImage":1220,"ogUrl":1221,"ogSiteName":686,"ogType":687,"canonicalUrls":1221,"schema":1222},"Tracking TCP Keepalives: Lessons in Docker, Golang & GitLab","An in-depth recap of debugging a bug in the Docker client library.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680874/Blog/Hero%20Images/network.jpg","https://about.gitlab.com/blog/tracking-down-missing-tcp-keepalives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":1224,"description":1219,"authors":1225,"heroImage":1220,"date":1227,"body":1228,"category":721,"tags":1229},"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab",[1226],"Stan Hu","2019-11-15","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2019-12-03.\n\n{: .alert .alert-info .note}\n\n\nWhat began as failure in a GitLab static analysis check led to a\n\ndizzying investigation that uncovered a subtle [bug in the Docker client\n\nlibrary code](https://github.com/docker/for-linux/issues/853) used by\n\nthe GitLab Runner. We ultimately worked around the problem by upgrading\n\nthe Go compiler, but in the process we uncovered an unexpected change in\n\nthe Go TCP keepalive defaults that fixed an issue with Docker and GitLab\n\nCI.\n\n\nThis investigation started on October 23, when backend engineer [Luke\n\nDuncalfe](/company/team/#.luke) mentioned, \"I'm seeing\n\n[`static-analysis` failures with no\noutput](https://gitlab.com/gitlab-org/gitlab/-/jobs/331174397).\n\nIs there something wrong with this job?\" He opened [a GitLab\n\nissue](https://gitlab.com/gitlab-org/gitlab/issues/34951) to discuss.\n\n\nWhen Luke ran the static analysis check locally on his laptop, he saw\n\nuseful debugging output when the test failed. For example, an extraneous\n\nnewline would accurately be reported by Rubocop. However, when the same\n\ntest ran in GitLab's automated test infrastructure, the test failed\n\nquietly:\n\n\n![Failed\njob](https://about.gitlab.com/images/blogimages/docker-tcp-keepalive-debug/job-failure.png){:\n.shadow.center}\n\n\nNotice how the job log did not include any clues after the `bin/rake\n\nlint:all` step. This made it difficult to determine whether a real\n\nproblem existed, or whether this was just a flaky test.\n\n\nIn the ensuing days, numerous team members reported the same problem.\n\nNothing kills productivity like silent test failures.\n\n\n## Was something wrong with the test itself?\n\n\nIn the past, we had seen that if that specific test generated enough\n\nerrors, [the output buffer would fill up, and the continuous integration\n\n(CI) job would lock\n\nindefinitely](https://gitlab.com/gitlab-org/gitlab-foss/issues/61432). We\n\nthought we had [fixed that issue months\n\nago](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/28402). Upon\n\nfurther review, that fix seemed to eliminate any chance of a thread\n\ndeadlock.\n\n\nDid we have to flush the buffer? No, because the Linux kernel will do\n\nthat for an exiting process already.\n\n\n## Was there a change in how CI logs were handled?\n\n\nWhen a test runs in GitLab CI, the [GitLab\n\nRunner](https://gitlab.com/gitlab-org/gitlab-runner/) launches a Docker\n\ncontainer that runs commands specified by a `.gitlab-ci.yml` inside the\n\nproject repository. As the job runs, the runner streams the output to\n\nthe GitLab API via PATCH requests. The GitLab backend saves this data\n\ninto a file. The following sequence diagram shows how this works:\n\n\n```plantuml\n\n== Get a job! ==\n\nRunner -> GitLab: POST /api/v4/jobs/request\n\nGitLab -> Runner: 201 Job was scheduled\n\n\n== Job sends logs (1 of 2) ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n\n== Job sends logs (2 of 2) ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n```\n\n\n[Henrich Lee Yu](/company/team/#engwan) mentioned\n\nthat we had recently [disabled a feature flag that changed how GitLab\n\nhandled CI job\n\nlogs](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture).\n[The\n\ntiming seemed to line\n\nup](https://gitlab.com/gitlab-org/gitlab/issues/34951#note_236723888).\n\n\nThis feature, called live CI traces, eliminates the need for a shared\n\nPOSIX filesystem (e.g., NFS) when saving job logs to disk by:\n\n\n1. Streaming data into memory via Redis\n\n2. Persisting the data in the database (PostgreSQL)\n\n3. Archiving the final data into object storage\n\n\nWhen this flag is enabled, the flow of CI job logs looks something like\n\nthe following:\n\n\n```plantuml\n\n== Get a job! ==\n\nRunner -> GitLab: POST /api/v4/jobs/request\n\nGitLab -> Runner: 201 Job was scheduled\n\n\n== Job sends logs ==\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> Redis: Save chunk\n\nGitLab -> Runner: 202 Accepted\n\n...\n\n== Copy 128 KB chunks from Redis to database ==\n\nGitLab -> Redis: GET gitlab:ci:trace:id:chunks:0\n\nGitLab -> PostgreSQL: INSERT INTO ci_build_trace_chunks\n\n...\n\n== Job finishes ==\n\n\nRunner -> GitLab: PUT /api/v4/job/:id\n\nGitLab -> Runner: 200 Job was updated\n\n\n== Archive trace to object storage ==\n\n```\n\n\nLooking at the flow diagram above, we see that this approach has more\n\nsteps. After receiving data from the runner, something could have gone\n\nwrong with handling a chunk of data. However, we still had many\n\nquestions:\n\n\n1. Did the runners send the right data in the first place?\n\n1. Did GitLab drop a chunk of data somewhere?\n\n1. Did this new feature actually have anything to do with the problem?\n\n1. Are they really making another Gremlins movie?\n\n\n## Reproducing the bug: Simplify the `.gitlab-ci.yml`\n\n\nTo help answer those questions, we simplified the `.gitlab-ci.yml` to\n\nrun only the `static-analysis` step. We inserted a known Rubocop error,\n\nreplacing a `eq` with `eql`. We first ran this test on a separate GitLab\n\ninstance with a private runner. No luck there – the job showed the right\n\noutput:\n\n\n```\n\nOffenses:\n\n\nee/spec/models/project_spec.rb:55:42: C: RSpec/BeEql: Prefer be over eql.\n        expect(described_class.count).to eql(2)\n                                         ^^^\n\n12669 files inspected, 1 offense detected\n\n```\n\n\nHowever, we repeated the test on our staging server and found that we\n\nreproduced the original problem. In addition, the live CI trace feature\n\nflag had been activated on staging. Since the problem occurred with and\n\nwithout the feature, we could eliminate that feature as a possible\n\ncause.\n\n\nPerhaps something with the GitLab server environment caused a\n\nproblem. For example, could the load balancers be rate-limiting the\n\nrunners? As an experiment, we pointed a private runner at the staging\n\nserver and re-ran the test. This time, it succeeded: the output was\n\nshown. That seemed to suggest that the problem had more to do with the\n\nrunner than with the server.\n\n\n## Docker Machine vs. Docker\n\n\nOne key difference between the two tests: One runner used a shared,\n\nautoscaled runner using a [Docker\n\nMachine](https://docs.docker.com/machine/overview/) executor, and the\n\nprivate runner used a [Docker\n\nexecutor](https://docs.gitlab.com/runner/executors/docker.html).\n\n\nWhat does Docker Machine do exactly? The following diagram may help\n\nillustrate:\n\n\n![Docker Machine](https://docs.docker.com/machine/img/machine.png){:\n.medium.center}\n\n\nThe top-left shows a local Docker instance. When you run Docker from the\n\ncommand-line interface (e.g., `docker attach my-container`), the program\n\njust makes [REST calls to the Docker Engine\n\nAPI](https://docs.docker.com/engine/api/v1.40/).\n\n\nThe rest of the diagram shows how Docker Machine fits into the\n\npicture. Docker Machine is an entirely separate program. The GitLab\n\nRunner shells out to `docker-machine` to create and destroy virtual\n\nmachines using cloud-specific (e.g. Amazon, Google, etc.) drivers. Once\n\na machine is running, the runner then uses the Docker Engine API to run,\n\nwatch, and stop containers.\n\n\nNote that this API is used securely over an HTTPS connection. This is an\n\nimportant difference between the Docker Machine executor and Docker\n\nexecutor: The former needs to communicate across the network, while the\n\nlatter can either use a local TCP socket or UNIX domain socket.\n\n\n## Google Cloud Platform timeouts\n\n\nWe've known for a while that Google Cloud [has a 10-minute idle\n\ntimeout](https://cloud.google.com/compute/docs/troubleshooting/general-tips),\n\nwhich has caused issues in the past:\n\n\n> Note that idle connections are tracked for a maximum of 10 minutes,\n\n> after which their traffic is subject to firewall rules, including the\n\n> implied deny ingress rule. If your instance initiates or accepts\n\n> long-lived connections with an external host, you should adjust TCP\n\n> keep-alive settings on your Compute Engine instances to less than 600\n\n> seconds to ensure that connections are refreshed before the timeout\n\n> occurs.\n\n\nWas the problem caused by this timeout? With the Docker Machine\n\nexecutor, we found that we could reproduce the problem with a simple\n\n`.gitlab-ci.yml`:\n\n\n```yaml\n\nimage: \"busybox:latest\"\n\n\ntest:\n  script:\n    - date\n    - sleep 601\n    - echo \"Hello world!\"\n    - date\n    - exit 1\n```\n\n\nThis would reproduce the failure, where we would never see the `Hello\n\nworld!` output. Changing the `sleep 601` to `sleep 599` would make the\n\nproblem go away. Hurrah! All we have to do is tweak the system TCP\n\nkeepalives, right? Google provided these sensible settings:\n\n\n```sh\n\nsudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60\nnet.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5\n\n```\n\n\nHowever, enabling these kernel-level settings didn't solve the\n\nproblem. Were keepalives even being sent? Or was there some other issue?\n\nWe turned our attention to network traces.\n\n\n## Eavesdropping on Docker traffic\n\n\nIn order to understand what was happening, we needed to be able to\n\nmonitor the network communication between the runner and the Docker\n\ncontainer. But how exactly does the GitLab Runner stream data from a\n\nDocker container to the GitLab server?  The following diagram\n\nillustrates the flow:\n\n\n```plantuml\n\nRunner -> Docker: POST /containers/name/attach\n\nDocker -> Runner: \u003Ccontainer output>\n\nDocker -> Runner: \u003Ccontainer output>\n\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\n\nGitLab -> File: Save to disk\n\nGitLab -> Runner: 202 Accepted\n\n```\n\n\nFirst, the runner makes a [POST request to attach to the container\n\noutput](https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach).\n\nAs soon as a process running in the container outputs some data, Docker\n\nwill transmit the data over this HTTPS stream. The runner then copies\n\nthis data to GitLab via the PATCH request.\n\n\nHowever, as mentioned earlier, traffic between a GitLab Runner and the\n\nremote Docker machine is encrypted over HTTPS on port 2376. Was there an\n\neasy way to disable HTTPS? Searching through the code of Docker Machine,\n\nwe found that it did not appear to be supported out of the box.\n\n\nSince we couldn't disable HTTPS, we had two ways to eavesdrop:\n\n\n1. Use a man-in-the-middle proxy (e.g. [mitmproxy](https://mitmproxy.org/))\n\n1. Record the traffic and decrypt the traffic later using the private keys\n\n\n## Ok, let's be the man-in-the-middle!\n\n\nThe first seemed more straightforward, since [we already had experience\n\ndoing this with the Docker\n\nclient](https://docs.gitlab.com/ee/administration/packages/container_registry.html#running-the-docker-daemon-with-a-proxy).\n\n\nHowever, after [defining the proxy variables for GitLab\n\nRunner](https://docs.gitlab.com/runner/configuration/proxy.html#adding-proxy-variables-to-the-runner-config),\n\nwe found we were only able to intercept the GitLab API calls with\n\n`mitmproxy`. The Docker API calls still went directly to the remote\n\nhost. Something wasn't obeying the proxy configuration, but we didn't\n\ninvestigate further. We tried the second approach.\n\n\n## Decrypting TLS data\n\n\nTo decrypt TLS data, we would need to obtain the encryption keys. Where\n\nwere these located for a newly-created system with `docker-machine`? It\n\nturns out `docker-machine` worked in the following way:\n\n\n1. Call the Google Cloud API to create a new machine\n\n1. Create a `/root/.docker/machine/machines/:machine_name` directory\n\n1. Generate a new SSH keypair\n\n1. Install the SSH key on the server\n\n1. Generate a new TLS certificate and key\n\n1. Install and configure Docker on the newly-created machine with TLS\ncertificates\n\n\nAs long as the machine runs, the directory will contain the information\n\nneeded to decode this traffic. We ran `tcpdump` and saved the private keys.\n\n\nOur first attempt at decoding the traffic failed. Wireshark could not\n\ndecode the encrypted traffic, although general TCP traffic could still\n\nbe seen. Researching more, we found out why: If the encrypted traffic\n\nused a [Diffie-Hellman key\n\nexchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange),\n\nhaving the private keys would not suffice! This is by design, a property\n\ncalled [perfect forward\n\nsecrecy](https://en.m.wikipedia.org/wiki/Forward_secrecy).\n\n\nTo get around that limitation, we modified the GitLab Runner to disable\n\ncipher suites that used the Diffie-Hellman key exchange:\n\n\n```diff\n\ndiff --git\na/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\nb/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\n\nindex 6b4c6a7c0..a3f86d756 100644\n",[269,1230,550,745,1231,745,9,887,699],"git","google",{"slug":1233,"featured":6,"template":703},"tracking-down-missing-tcp-keepalives","content:en-us:blog:tracking-down-missing-tcp-keepalives.yml","Tracking Down Missing Tcp Keepalives","en-us/blog/tracking-down-missing-tcp-keepalives.yml","en-us/blog/tracking-down-missing-tcp-keepalives",{"_path":1239,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1240,"content":1245,"config":1251,"_id":1253,"_type":14,"title":1254,"_source":16,"_file":1255,"_stem":1256,"_extension":19},"/en-us/blog/tuto-mac-m1-gitlab-ci",{"title":1241,"description":1242,"ogTitle":1241,"ogDescription":1242,"noIndex":6,"ogImage":1182,"ogUrl":1243,"ogSiteName":686,"ogType":687,"canonicalUrls":1243,"schema":1244},"How to use Scaleway to self-host your GitLab Runners","Learn how to set up GitLab CI for your iOS and macOS projects using a hosted Mac mini M1.","https://about.gitlab.com/blog/tuto-mac-m1-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Scaleway to self-host your GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benedikt Rollik\"}],\n        \"datePublished\": \"2021-06-07\",\n      }",{"title":1241,"description":1242,"authors":1246,"heroImage":1182,"date":1248,"body":1249,"category":721,"tags":1250},[1247],"Benedikt Rollik","2021-06-07","GitLab's complete DevOps platform comes with built-in continuous integration\n(CI) and continuous delivery (CD) via [GitLab\nCI/CD](https://docs.gitlab.com/ee/ci/). GitLab CI/CD is a great solution to\nincrease developer productivity and motivation to write higher-quality code\nwithout sacrificing speed. It runs a series of tests every time a commit is\npushed, providing immediate visibility into the results of changes in the\ncodebase. While it is not a hassle to set up a CI using Linux-based\nmachines, iOS and macOS developers may find it is more complicated to have\naccess to a Mac that is connected and available 24 hours a day.\n\n\nGitLab Runners, provided on GitLab.com, are the engine that executes CI\nworkflows. Due to various requirements, some users may opt to self-host\nrunners on public cloud VMs. This is super easy if the build VM OS\nrequirement is Linux-based since there are several low-cost public cloud\nLinux-based VM solutions. However, iOS and macOS developers may find fewer\noptions for public cloud-delivered macOS based systems.\n\n\nIn this blog post tutorial, you will learn how to set up CI for iOS and\nmacOS application development using a Scaleway Virtual Instance running the\n[GitLab\napplication](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/)\nand a GitLab Runner that runs on a Scaleway-hosted [Mac mini\nM1](https://www.scaleway.com/en/hello-m1/). To complete this tutorial most\nsuccessfully, we assume that you have some experience creating Xcode and\nGitLab projects, as well as some experiences using a Terminal and git.\n\n\n> **Requirements**\n\n>\n\n- You have an account and are logged into\n[console.scaleway.com](https://console.scaleway.com)\n\n- You have [configured your SSH\nKey](https://www.scaleway.com/en/docs/configure-new-ssh-key/)\n\n- You have a Virtual Instance running the GitLab InstantApp\n\n- **Note:** We assume you have already deployed a Virtual Instance running\nthe GitLab InstantApp. If not, [deploy\nGitLab](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/) before\ncontinuing with this tutorial.\n\n\n### Deploying the Mac mini M1\n\n\n1. Log into your [Scaleway console](https://console.scaleway.com) and click\non **Apple silicon** in the **Compute** section of the sidebar.\n\n   ![Orga_dashboard](https://about.gitlab.com/images/blogimages/scaleway-blog/Orga_dashboard.png){: .shadow.medium}\n   Click on the \"Apple silicon\" in the Scaleway console.\n   {: .note.text-center}\n\n1. The Apple silicon M1 as-a-Service splash screen displays. Click **Create\na Mac mini M1**.\n\n1. Enter the details for your Mac mini M1:\n\n   - Select the geographic region in which your Mac mini M1 will be deployed.\n   - Choose the macOS version you want to run on the Mac mini M1.\n   - Select the hardware configuration for your Mac mini M1.\n   - Enter a name for your Mac mini M1.\n\n1. Click **Create a Mac mini M1** to launch the installation of your Apple\nsilicon M1 as-a-Service.\n\n   ![M1_creation](https://about.gitlab.com/images/blogimages/scaleway-blog/M1_creation.png){: .shadow.medium}\n   Click \"Create a Mac mini M1\" to launch.\n   {: .note.text-center}\n\n1. Once deployed click **VNC** from the Mac mini M1 Overview page to launch\nthe remote desktop client.\n\n\n1. Launch the **App Store** and install the **Xcode development\nenvironment** on your Mac mini M1.\n\n\n### Setting-up the Homebrew package manager\n\n\n[Homebrew](https://brew.sh/) is a package manager for macOS. It can be used\nto manage the software installed on your Mac. We use it to install\n`gitlab-runner` on your Mac mini M1.\n\n\n1. Click on the Terminal icon to open a new **Terminal**.\n\n\n1. Copy-paste the following code in the terminal application and press\n**Enter** to install Homebrew and the Xcode command line tools:\n\n   ```sh\n   /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"\n   ```\n\nLeave the terminal window open since it is required for the next step.\n\n\n#### Installing the GitLab Runner\n\n\nThe GitLab Runner is an application installed on a different computer than\nyour GitLab host and runs jobs in a pipeline. It executes the build task on\nyour Mac mini M1 for the code you push to your GitLab instance.\n\n\n1. Make sure you are still in the **Terminal** application. If you closed it\nafter installing Homebrew, open a new one.\n\n\n1. Run the following command to install `gitlab-runner`:\n\n   ```\n   brew install gitlab-runner\n   ```\n\n### Configuring the Runner in GitLab\n\n   > **Note:** You require a Virtual Instance running the [GitLab InstantApp](https://www.scaleway.com/en/docs/how-to-use-the-gitlab-instant-apps/) for the following steps.\n\n1. GitLab Runner requires a registration token for the link between your\nGitLab Instance and the Runner. Open the GitLab web interface of your\nVirtual Instance and log into it.\n\n\n1. Select the project you want to use in GitLab with the Runner. If you\ndon't have a project yet, click **+** > **Create Project** and fill in the\nrequired information about the project.\n\n\n1. On the projects overview page, click **Settings** > **CI/CD** to view the\nContinuous Integration settings.\n\n\n1. On the Continuous Integration settings page, click **Expand** in the\n**Runner** section to view the required information to link GitLab with your\nRunner.\n\n\n1. Scroll down to retrieve the GitLab Instance URL and the registration\ntoken.\n\n\n1. Run the following command in the Terminal application on your Mac to\nlaunch the configuration wizard for your GitLab Runner:\n\n   ```\n   gitlab-runner register\n   ```\n\n   Enter the required information as follows:\n\n   ```\n   Runtime platform                                    arch=arm64 os=darwin pid=810 revision=2ebc4dc4 version=13.9.0\n   WARNING: Running in user-mode.\n   WARNING: Use sudo for system-mode:\n   WARNING: $ sudo gitlab-runner...\n\n   Enter the GitLab instance URL (for example, https://gitlab.com/):\n   http://163.172.141.212/   \u003C- Enter the URL of your GitLab instance\n   Enter the registration token:\n   1mWBwzWAZSL7-pR18K3Y  \u003C- Enter the registration token for your Runner\n   Enter a description for the runner:\n   [306a20a2-2e01-4f2e-bc76-a004d35d9962]: Mac mini M1  \u003C- Enter a description for your Runner\n   Enter tags for the runner (comma-separated):\n   Mac, mini, M1, dev, xcode  \u003C- Optionally, enter tags for the runner\n   Registering runner... succeeded                     runner=1mWBwzWA\n   Enter an executor: shell, virtualbox, docker+machine, custom, docker, docker-ssh, kubernetes, parallels, ssh, docker-ssh+machine:\n   shell  \u003C- Enter the \"shell\" executor for the runner\n   Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n   ```\n\n1. Reload the CI/CD configuration page of your GitLab instance. The runner\nis now linked to your project and displays as available.\n\n   > **Note:** If you have several projects in a GitLab group, you can configure the Runner at the group-level. Runners available at the group-level are available for all projects within said group.\n\n### Configuring CI for your project\n\n\nGitLab stores the configuration of the CI in a file called `.gitlab-ci.yml`.\nThis file should be in the folder you created for your project. Typically\nthis is the same directory where your Xcode project file\n(`ProjectName.xcodeproj`) is located. The GitLab CI configuration file is\nwritten in [YAML](https://yaml.org/).\n\n\nInside the configuration file you can specify information like:\n\n\n* The scripts you want to run.\n\n* Other configuration files and templates you want to include.\n\n* Dependencies and caches.\n\n* The commands you want to run in sequence and those you want to run in\nparallel.\n\n* The location to deploy your application to.\n\n* Whether you want to run the scripts automatically or trigger any of them\nmanually.\n\n\n1. Open a text editor on your local computer and create the `.gitlab-ci.yml`\nfile as in the following example.\n\n   ```\n   stages:\n     - build\n     - test\n\n   build-code-job:\n     stage: build\n     script:\n       - echo \"Check the ruby version, then build some Ruby project files:\"\n       - ruby -v\n       - rake\n\n   test-code-job1:\n     stage: test\n     script:\n       - echo \"If the files are built successfully, test some files with one command:\"\n       - rake test1\n   ```\n\n1. Save the file and make a new commit to add it to your repository.\n\n\n1. Push the commit to GitLab. The CI will automatically launch the tasks on\nyour Runner.\n\n\nFor more information on the GitLab CI configuration file, refer to the\n[official\ndocumentation](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\n\n\n### Speed up development with Scaleway and GitLab\n\n\nHaving a dedicated Mac available for executing your CI jobs can reduce your\ndevelopment team's cycle time. In this tutorial, we covered configuring a\ndedicated Mac mini M1 to host a GitLab Runner. If you want to learn more\nabout the Mac mini M1 as-a-Service, refer to our [product\ndocumentation](https://www.scaleway.com/en/docs/apple-silicon-as-a-service-quickstart/).\n\nWe invite the GitLab community to start building on Scaleway today with a\n€10 voucher to use on dozens of products & services. Find out more\n[here.](https://www.scaleway.com/en/gitlab-m1/)\n\n\n\u003Chr>\n\n\n_Mac mini, macOS are trademarks of Apple Inc., registered in the U.S. and\nother countries and regions. IOS is a trademark or registered trademark of\nCisco in the U.S. and other countries and is used by Apple under license.\nScaleway is not affiliated with Apple Inc._\n",[1230,9,9],{"slug":1252,"featured":6,"template":703},"tuto-mac-m1-gitlab-ci","content:en-us:blog:tuto-mac-m1-gitlab-ci.yml","Tuto Mac M1 Gitlab Ci","en-us/blog/tuto-mac-m1-gitlab-ci.yml","en-us/blog/tuto-mac-m1-gitlab-ci",{"_path":1258,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1259,"content":1265,"config":1273,"_id":1275,"_type":14,"title":1276,"_source":16,"_file":1277,"_stem":1278,"_extension":19},"/en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"title":1260,"description":1261,"ogTitle":1260,"ogDescription":1261,"noIndex":6,"ogImage":1262,"ogUrl":1263,"ogSiteName":686,"ogType":687,"canonicalUrls":1263,"schema":1264},"Ultimate guide to migrating from AWS CodeCommit to GitLab","Learn how to migrate from AWS Services to GitLab and seamlessly integrate with the DevSecOps platform in this comprehensive tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097810/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2828%29_4mi0l4wzUa5VI4wtf8gInx_1750097810027.png","https://about.gitlab.com/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to migrating from AWS CodeCommit to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"},{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":1260,"description":1261,"authors":1266,"heroImage":1262,"date":1270,"body":1271,"category":698,"tags":1272},[1267,718,1268,1269],"Tsukasa Komatsubara","Samer Akkoub","Bart Zhang","2024-08-26","On July 25, 2024, AWS made a significant announcement regarding its CodeCommit service. As detailed in their [official blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/), AWS has decided to close new customer access to CodeCommit. While existing customers can continue using the service, AWS will not introduce new features, focusing only on security, availability, and performance improvements.\n\nThis announcement has prompted development teams to consider migrating their repositories to alternative Git providers. In light of these changes, we've prepared this comprehensive guide to assist teams in migrating to GitLab and integrating with other AWS services.\n\n**Note:** For more details on AWS's official migration recommendations, please refer to [their blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/).\n\n## About this guide\n\nThis guide provides comprehensive information for development teams using GitLab who are considering integration with AWS services or planning to migrate from AWS-hosted Git repositories to GitLab.com. The guide is structured into three main sections:\n\n- [Parallel migration to GitLab](#section-1-parallel-migration-to-gitlab): Explains how to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n\n- [Integration with AWS CodeBuild](#section-2-integrating-gitlab-with-aws-codebuild): Provides steps to integrate GitLab repositories with AWS CodeBuild, setting up a powerful continuous integration (CI) environment.\n\n- [Integration with AWS CodePipeline](#section-3-integrating-gitlab-with-aws-codepipeline): Details how to connect GitLab repositories with AWS CodePipeline to build efficient continuous delivery (CD) pipelines.\n\n- [Downstream integrations for CodePipeline and CodeStar Connections](#section-4-migrating-to-gitlab): Explains how to leverage GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nThrough this guide, you'll learn how to combine the powerful features of GitLab and AWS to create an efficient and flexible development workflow.\n\n## Section 1: Parallel migration to GitLab \n\nFor those considering migrating Git repositories hosted on AWS to GitLab.com, this section, which is a phased approach, introduces methods to achieve migration while minimizing risks. By leveraging GitLab's mirroring capabilities, you can maintain existing development flows while testing the new environment.\n\n### Why is parallel migration important?\n\nLarge-scale system migrations always involve risks, particularly potential impacts on ongoing development work, existing integrations, and automated processes. Adopting a parallel migration approach offers the following benefits:\n\n1. Risk minimization: Test the new environment while keeping existing systems operational.\n2. Seamless transition: Development teams can gradually acclimate to the new system.\n3. Integration testing: Thoroughly test all integrations and automation in the new environment.\n4. Future-proofing: Enable teams to gradually migrate to GitLab CI/CD in parallel to existing CI.\n\nParallel migration is not required if it is already known that you want to cut over directly to GitLab.\n\n### Steps for migrating to GitLab.com\n\n#### Step 1: Get set up on GitLab.com\n\n- Check if your company already has a group in use on GitLab.com and whether they have single sign-on (SSO) set up – if they do, then you will want to use both.\n\n- If your company does not have a presence on GitLab.com, visit [GitLab.com](www.gitlab.com) and create a new account or log in to an existing one.\n- Create a new company namespace (a group at the root level of gitlab.com).\n- Pick a name that reflects your entire company (and is not already taken).\n\n#### Step 2: Import repository\nFor parallel migration: Use GitLab's pull mirroring feature to automatically sync changes from AWS-hosted repositories to GitLab.com.\n\n1. Navigate to the target group GitLab.com.\n2. In the upper right, click \"New project.\"\n3. On the \"Create new project\" page, click \"Import project.\"\n4. On the \"Import project\" page, click \"Repository by URL.\"\n5. Enter the URL of your AWS-hosted repository in the \"Git repository URL\" field.\n6. Underneath the Git repository URL field, check \"Mirror repository.\"\n7. Set up authentication: in the AWS CodeCommit console, select the clone URL for the repository you will migrate. If you plan on importing CodeCommit repositories into GitLab, you can use the HTTPS CodeCommit URL to clone the repository via GitLab Repository Mirroring. You will need to also provide your Git credentials from AWS for your identity and access management (IAM) user within GitLab. You can create Git credentials for AWS CodeCommit by following this [AWS guide](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html).\n\n![Clone URL](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/clone-url-screenshot__1__aHR0cHM6_1750097822121.png)\n\nThis setup will automatically pull changes from the AWS-hosted repository to GitLab.com every five minutes by default.\n\nFor more information, read our [repository mirroring documentation](https://docs.gitlab.com/ee/user/project/repository/mirror/).\n\n#### Step 3: Test and validate integrations\n\n1. CI/CD pipelines: Set up the `.gitlab-ci.yml` file in GitLab CI to replicate existing pipelines. You can read more about [planning a migration from other CI tools into GitLab CI/CD](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html).\n2. Issue tracking: Import project issues and test workflows.\n3. Code review: Set up the merge request process and test review workflows.\n\n#### Step 4: Gradual migration\n\n1. Start with small or non-critical projects to familiarize yourself with working on GitLab.com.\n2. Provide training for team members and allow time to adapt to new workflows.\n3. Gradually migrate more projects while ensuring integrations and workflows are problem-free.\n\nFor more information, see [Automating Migrations from CodeCommit to GitLab](https://gitlab.com/guided-explorations/aws/migrating-from-codecommit-to-gitlab/-/blob/main/migrating_codecommit_to_gitlab.md).\n\n#### Step 5: Complete migration\nOnce all tests and validations are complete and the team is comfortable with the new environment, plan for full migration. For each project:\n\n1. Set a migration date and notify all stakeholders.\n2. Perform final data synchronization.\n3. Remove mirroring settings from the GitLab project.\n4. Set AWS-hosted repositories to read-only and transition all development work to GitLab.com.\n\n#### Step 6: Assess adoption of new capabilities\n\nGitLab collaboration and workflow automation for developers is far richer than CodeCommit. It merits some time to learn what these capabilities are. The merge request process is especially rich compared to CodeCommit.\n\nAfter repositories are stable on GitLab, it is very easy to experiment with GitLab CI/CD in parallel to an existing solution. Teams can take time to perfect their GitLab CI/CD automation while production workflows remain unaffected.\n\nGitLab artifact management is also very capable with the Releases feature and many package registries.\n\n### Section 1: Summary\nBy adopting a parallel migration approach to GitLab, you can achieve a smooth transition while minimizing risks. This process allows teams to gradually adapt to the new environment and ensure all integrations and automations function correctly. Cutover migrations only omit a single setting checkbox if it is known that a parallel migration is not necessary.\n\n## Section 2: Integrating GitLab with AWS CodeBuild\n\nFor those wanting to build and test code from GitLab repositories using AWS CodeBuild, this comprehensive guide will help you set up an efficient CI pipeline.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodeBuild service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connect setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822122.png)\n\n### Step 2: Create AWS CodeBuild project\n\n1. Click \"Create build project\" on the CodeBuild dashboard.\n2. Enter a project name and description.\n3. For source settings, select \"GitLab\" as the provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n\n![Add CodeBuild project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_3_add_codebuild_aHR0cHM6_1750097822123.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 2\nThis section explained in detail how to integrate GitLab repositories with AWS CodeBuild. This setup enables a continuous integration pipeline where code changes in GitLab are automatically built and tested using AWS CodeBuild.\n\n## Section 3: Integrating GitLab with AWS CodePipeline\n\nFor those looking to implement continuous delivery from GitLab repositories using AWS CodePipeline, this detailed guide will be helpful. The integration has become even easier now that GitLab is available as an AWS CodeStar Connections provider.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodePipeline service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connections setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822125.png)\n\n### Step 2: Create AWS CodePipeline\n\n1. Click \"Create pipeline\" on the CodePipeline dashboard.\n2. Enter a pipeline name and click \"Next.\"\n3. Select \"GitLab\" as the source provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n5. Select the Trigger type: You can trigger CodePipeline pipeline execution based on either pull or push events against specific branches and file types within your repository.\n\n![Add source provider](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_provider_aHR0cHM6_1750097822127.png)\n\n![Add source configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_configured_aHR0cHM6_1750097822129.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 3\nThis section detailed how to integrate GitLab repositories with AWS CodePipeline. This setup enables a continuous delivery pipeline where code changes in GitLab are automatically deployed to your AWS environment.\n\n## Section 4: Migrating to GitLab\n\nIntegrating GitLab with AWS unlocks powerful capabilities for streamlining your development and deployment workflows and helps to solve your source code management woes. This integration can be achieved in several ways, each offering unique benefits:\n\n- Using AWS CodeStar Connections to link GitLab with AWS services enables a more cohesive workflow by allowing external Git repositories, like GitLab, to connect with various AWS services. This setup supports automated builds, deployments, and other essential actions directly from your GitLab repository, making your development process more integrated and streamlined.\n\n- Connecting GitLab with AWS CodePipeline via AWS CodeStar Connections takes automation to the next level by allowing you to create a full CI/CD pipeline. This approach integrates GitLab with AWS CodePipeline, enabling you to automate the entire process – from source control and builds to testing and deployment – using AWS services like CodeBuild and CodeDeploy. This ensures a robust, scalable, and efficient delivery process.\n\n![Chart of new technology and solutions for using GitLab and AWS together](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/Announcing_New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together_aHR0cHM6_1750097822130.png)\n\n1\\. Connecting GitLab with AWS services using AWS CodeStar Connections\n\nAWS CodeStar Connections is a service that allows you to connect external Git repositories (such as GitHub or Bitbucket) to AWS services. You can also connect GitLab to AWS services via CodeStar Connections. When using GitLab, you may need to set up a custom connection as an HTTP Git server.\nThe following AWS services can be connected to GitLab using this method:\n\n- **AWS Service Catalog**\n\nAWS Service Catalog helps organizations standardize and manage AWS resources. Integrating it with GitLab improves transparency in resource management and simplifies change tracking. Specifically, you can automate catalog updates based on GitLab commits, enhancing operational efficiency.\n\n- __AWS CodeBuild__\n\nAWS CodeBuild is a managed build service that compiles source code, runs tests, and produces deployable software packages. Integrating GitLab with CodeBuild allows automated build processes to start whenever code changes are pushed to GitLab. This ensures consistency in builds and facilitates easier collaboration and version control.\n\n- __AWS Glue Notebook Jobs__\n\nAWS Glue Notebook Jobs is a service that allows you to interactively develop and run data preparation and ETL (Extract, Transform, Load) tasks. Integrating GitLab with Glue Notebook Jobs enables version control for notebooks and ETL scripts, promotes collaboration among team members, and improves the quality management of data processing pipelines.\n\n- __AWS Proton__\n\nAWS Proton is a service that automates the development and deployment of microservices and serverless applications. By integrating GitLab with AWS Proton, you can manage infrastructure as code, automate deployments, and ensure consistent environment management, leading to more efficient development processes.\n\nAs AWS CodeStar Connections supports more services, connecting GitLab with additional AWS services will become easier. It's advisable to regularly check for new services that support CodeStar Connections.\n\n2. Connecting CodePipeline with GitLab via AWS CodeStar Connections (including CodeDeploy)\n\nAWS CodePipeline is a continuous delivery service that automates the release process for software. To connect GitLab with CodePipeline, you need to use AWS CodeStar Connections. This setup allows you to designate a GitLab repository as the source and automate the entire CI/CD pipeline.\nThe primary actions supported by CodePipeline include:\n- **Source control:** AWS CodeCommit, GitHub, Bitbucket, GitLab\n- **Build and test:** AWS CodeBuild, Jenkins\n- **Deploy:** AWS CodeDeploy, Elastic Beanstalk, ECS, S3\n- **Approval:** Manual approval\n- **Infrastructure management:** AWS CloudFormation\n- **Serverless:** AWS Lambda\n- **Testing:** AWS Device Farm\n- **Custom Actions:** AWS Step Functions\n\nBy integrating GitLab with CodePipeline, you can automatically trigger the pipeline whenever code changes are pushed to GitLab, allowing a consistent process from build to deployment. Additionally, combining this with GitLab's version control capabilities makes it easier to track deployment history and states, leading to more flexible and reliable software delivery.\n\n## What you've learned\nThis guide has provided comprehensive information on migrating to and integrating GitLab with AWS. Through the four main topics, we've covered:\n- Parallel migration to GitLab: How to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n- Integration with AWS CodeBuild: Steps to set up a powerful CI environment integrated with GitLab repositories.\n- Integration with AWS CodePipeline: How to build efficient continuous delivery pipelines using GitLab repositories.\n- Downstream integrations for CodePipeline and CodeStar Connections: Leveraging GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nAs every organization's code hosting and integration implementation strategy is unique, this tutorial may be used as a starting point for your own GitLab + AWS integration and implementation strategy.\n\n## Additional resources\n\nFor more detailed information and advanced configurations, refer to the following resources:\n\n- [GitLab documentation](https://docs.gitlab.com/)\n- [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html)\n- [AWS CodePipeline User Guide](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/)\n- [Integrate with AWS](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html)\n\nIf you have questions or need support, please contact [GitLab Support](https://about.gitlab.com/support/) or AWS Support. We hope this comprehensive guide helps you in your AWS-GitLab integration journey.",[109,9,496,700,767,698,233],{"slug":1274,"featured":91,"template":703},"ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","content:en-us:blog:ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","Ultimate Guide To Migrating From Aws Codecommit To Gitlab","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"_path":1280,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1281,"content":1287,"config":1294,"_id":1296,"_type":14,"title":1297,"_source":16,"_file":1298,"_stem":1299,"_extension":19},"/en-us/blog/why-i-love-contributing-to-gitlab",{"title":1282,"description":1283,"ogTitle":1282,"ogDescription":1283,"noIndex":6,"ogImage":1284,"ogUrl":1285,"ogSiteName":686,"ogType":687,"canonicalUrls":1285,"schema":1286},"Why I love contributing to GitLab","Making small meaningful changes is what it's all about.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679501/Blog/Hero%20Images/new-feature.png","https://about.gitlab.com/blog/why-i-love-contributing-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why I love contributing to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2021-05-11\",\n      }",{"title":1282,"description":1283,"authors":1288,"heroImage":1284,"date":1290,"body":1291,"category":972,"tags":1292},[1289],"Austin Regnery","2021-05-11","It was mid-morning on a Tuesday in February, and I had 10 minutes in between\nmeetings. So I decided to try and solve a pain point of mine. \n\n\nYou see, I had to memorize this HTML snippet to create a collapsible section\nin GitLab Issue descriptions and comments, but I kept forgetting it. Was it\n`summary` or `section`? I could never remember.\n\n\n```html\n\n\u003Cdetails>\n\n\u003Csummary>Insert Title\u003C/summary>\n\nHidden content\n\n\u003C/details>\n\n```\n\n\nEven though it is not vanilla Markdown, GitLab knows how to interpret some\nHTML. I used this formatting trick fairly often since full-page screenshots\ncan occupy a lot of screen space, which leads to excessive scrolling.\n\n\n\nSo I decided to poke around our codebase to see how the other Markdown\nshortcuts worked. To my surprise, it was pretty straightforward. Each\nshortcut had a simple text input that mapped to each button. This\nimplementation was simple to replicate since I just needed to copy/paste and\nreplace a few words.\n\n\n![Image of Vue and Haml files with editor\nshortcuts](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/vue-haml.png){:\n.shadow}\n\n\nThe Vue and Haml files with the new shortcut\n\n{: .note.text-center}\n\n\nI started a branch and began hacking away at the code. Now, I would never\ncall myself a Software Engineer, but I like to try and make things from time\nto time. I was able to add a new shortcut to the toolbar to insert this code\nsnippet for me in less than 10 minutes. No more memorizing! Making\ncontributions like this is what makes working at GitLab so special.\n\n\nNow, it wasn't ready for production, but I at least had something that\nworked. I shared it with my UX colleagues in Slack, and it started to gain\ntraction with several up-votes and few constructive comments on how to make\nit better.\n\n\nWith the functionality flushed out, a few other designers helped me get a\nbetter icon added to our SVG library. Using clear iconography is critical\nfor communicating information more clearly.\n\n\n| Initial Icon | Final Icon |\n\n| - | - |\n\n| ![SVG of chevron right\nicon](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/chevron-right.svg)\n| ![SVG of details block\nicon](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/details-block.svg)\n|\n\n\nThe last thing to do was resolve my failing tests, and I had several\nteammates help me do that.\n\n\n![Gif of the shortcut being\nused](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/demo.gif){:\n.shadow}\n\n\nToday [this\nchange](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/54938) merged!\nNow I solved a pain point for me and others. It took a few months to go from\nidea to production, but the effort was super low. I'd say the return on my\ninitial investment, 10 minutes, is super high.\n\n\n> Having a direct impact on a product was never an option for me before\njoining GitLab.\n\n\n![Image of participants in the Merge\nRequest](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/participants.png){:\n.shadow}\n\n\nThank you to everyone that helped me deploy this\n\n{: .note.text-center}\n",[1293,698,9],"UX",{"slug":1295,"featured":6,"template":703},"why-i-love-contributing-to-gitlab","content:en-us:blog:why-i-love-contributing-to-gitlab.yml","Why I Love Contributing To Gitlab","en-us/blog/why-i-love-contributing-to-gitlab.yml","en-us/blog/why-i-love-contributing-to-gitlab",{"_path":1301,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1302,"content":1308,"config":1316,"_id":1318,"_type":14,"title":1319,"_source":16,"_file":1320,"_stem":1321,"_extension":19},"/en-us/blog/write-vulnerability-detection-rules",{"title":1303,"description":1304,"ogTitle":1303,"ogDescription":1304,"noIndex":6,"ogImage":1305,"ogUrl":1306,"ogSiteName":686,"ogType":687,"canonicalUrls":1306,"schema":1307},"How to write and continuously test vulnerability detection rules for SAST","Interns with the Google Summer of Code helped GitLab transition from our old SAST tools to Semgrep.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667819/Blog/Hero%20Images/anomaly-detection-cover.png","https://about.gitlab.com/blog/write-vulnerability-detection-rules","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to write and continuously test vulnerability detection rules for SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ross Fuhrman\"},{\"@type\":\"Person\",\"name\":\"Anshuman Singh\"},{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":1303,"description":1304,"authors":1309,"heroImage":1305,"date":1313,"body":1314,"category":929,"tags":1315},[1310,1311,1312],"Ross Fuhrman","Anshuman Singh","Julian Thome","2021-09-08","In summer 2021, the [Vulnerability\nResearch](/handbook/engineering/development/sec/secure/vulnerability-research/)\nand [Static\nAnalysis](/handbook/engineering/development/sec/secure/static-analysis/)\n\nteams launched the [Google Summer of Code\n(GSoC)](https://summerofcode.withgoogle.com/) project: [Write vulnerability\ndetection rules for\nSAST](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/3).\n\n\nFor this project, we built and implemented a framework to helps transition\nGitLab away from our current SAST tools over to Semgrep. Semgrep is a\nlanguage-agnostic SAST tool that is gaining popularity in CI/CD\nenvironments.\n\nBefore replacing an analyzer with the corresponding Semgrep configuration\n(called rule-sets), we need to ensure that they are equivalent – in that\nthey yield the same set of findings.\n\n\nFor this purpose, we built a testing framework that helps us assess the\nquality of a Semgrep rule-set. This framework has been used to guide the\nreplacement of\n[flawfinder](https://gitlab.com/gitlab-org/security-products/analyzers/flawfinder),\na C/C++ analyzer with a corresponding Semgrep rule-set. This new testing\nframework leverages the power of GitLab CI/CD.\n\n\n## Preliminaries\n\n\n### GitLab and the Google Summer Of Code (GSoC)\n\n\nThe Google Summer of Code (GSoC) is a 10-week program that enlists student\ninterns to work on an open source project in collaboration with open source\norganizations. For GSoC 2021, GitLab offered [4 GSoC projects to the GSoC\ninterns](/blog/gsoc-at-gitlab/). The [interns completed each of\nproject](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues)\nunder the guidance of a GitLab team member who serves as their mentor and\nprovides regular feedback and assistance when needed.\n\n\n**[Read reflections from the Google Summer of Code interns about [what it\nwas like working with GitLab](/blog/gsoc-at-gitlab/)]**\n\n\n### About Semgrep\n\n\n[Semgrep](https://semgrep.dev/) is a language-agnostic static-analysis\n(SAST) tool that is powered by\n[tree-sitter](https://tree-sitter.github.io/tree-sitter/). Tree-sitter is a\nrobust parser-generator tool that supports parsing a variety of languages.\n\n\nSemgrep supports a\n[rule-syntax](https://semgrep.dev/docs/writing-rules/rule-syntax/) which can\nbe used to formulate detection rules in a configuration-as-code YAML format.\nA Semgrep rule determines the findings that Semgrep is supposed to detect.\nThese rules are combined together to create a rule-set.\n\n\n### About GitLab SAST\n\n\nGitLab is a complete DevSecOps platform and integrates a [variety of static\nanalysis\ntools](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html)\nthat help developers find vulnerabilities as early as possible in the\nsoftware development lifecycle (SDLC).\n\n\nSince all the integrated SAST tools are very different in terms of\nimplementation as well as tech stack they depend on, the SAST tools are all\nwrapped in Docker images. The wrappers translate the native vulnerability\nreports to a [generic, common report\nformat](https://docs.gitlab.com/ee/user/application_security/sast/) which is\nmade available by means of the `gl-sast-report.json` artifact. This generic\nreport is GitLab's common interface between analyzers and the GitLab Rails\nbackend.\n\n\n## Write vulnerability detection rules\n\n\n### Some background on our SAST tools\n\n\nOver time, the growing number of integrated SAST tools has become a\nmaintenance burden for GitLab due to two major contributing factors.\n\n\n1. **Integration cost**: All SAST tools have different release cycles – new\nreleases have to be pulled in immediately so that our users can benefit from\nthem. Given the large amount of integrated SAST tools, the time spent to\nmonitor the SAST tools for new releases, integrating and testing them is\nexpensive in terms of engineering effort/time.\n\n\n1. **Inflexibility**: Adapting or modifying SAST tools behavior is\nnon-trivial because each tool is based on different technologies. Also,\nupstream contributions to the original analyzer repositories are not\nguaranteed to be included by the maintainers. In these cases, they require\nus to fork a project which is not a scalable solution with regards to\nmaintenance effort.\n\n\nGitLab is in the process of replacing various SAST tools with a single,\nlanguage-agnostic SAST tool, called Semgrep, to fix these problems. Semgrep\ncan be configured by means of rules that are used to define what Semgrep is\nsupposed to find. These rules are provided as YAML configuration files so it\nis fairly easy to modify the behavior of Semgrep to different use cases.\n\nSemgrep's configuration-as-code approach paired with its language support\nenables us to replace multiple analyzers, which effectively reduces the\nmaintenance burden.\n\n\nHowever, the SAST tool replacement itself is a challenging process. For the\nmajority of use cases we have to assume that there is already a large amount\nof historic vulnerability data recorded and acted upon using [GitLab's\nvulnerability management\nfeatures](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/).\nUsers may also have grown accustomed to working with certain analyzers and\nmay even have a certain level of expectation with regards to the findings\nproduced by the analyzer.\n\n\nA smooth transition from a language-specific analyzer to a corresponding\nSemgrep rule-set must be guaranteed by meeting a certain level of quality\nassurance. A rule-set should be at least as good as the results produced by\nthe original analyzers, also known as parity. In turn, parity required we\nbuild test-suites to be used to measure the gap (in terms of rule coverage)\nbetween the original analyzer and the rule-set that is to replace it. A good\nquality rule-set is expected to perform at least as well as the SAST tool it\naims to replace (zero gap, full parity).\n\n\nThere are cases where the original SAST tool may falsely report\nvulnerabilities. In these situations, we aim to improve our rule-set in a\ncontrolled manner by explicitly documenting our improvements. However,\nbefore improving a rule-set, we want to start from a position of complete\nparity so that we have a holistic view of the impact incurred by single rule\nimprovements. This documentation of applied improvements is important so we\ncan justify changes with regard to reported findings to the customer.\n\n\nThere are three challenges we tried to address with this project:\n\n\n1. **Rule management**: Provide a central rule repository to store,\ndistribute and track changes applied to rules as well as test-cases.\n\n1. **Rule testing**: Every change applied to a rule in the rule repository\ntriggers an automated gap-analysis that measure the quality of the rules in\ncomparison to the original analyzers.\n\n1. **Analyzer replacement**: Replace at least one SAST tool (in our case\nflawfinder) with a corresponding rule-set – use the testing framework to\nensure that the rule-set is on par with the original SAST tool.\n\n\nWe unpack each of these challenges in the next section.\n\n\n### How we approached these challenges\n\n\nThe architecture of the rule-testing framework is depicted in the code\nsnippets below. All the Semgrep rules and the corresponding test-cases are\nstored in a central rule repository. Changes that are applied to the rules\ntrigger the execution of our rule testing framework that uses the rules and\ntest-cases to perform an automated gap analysis.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n  crr[GitLab Rule Repository]\n\n  bandit[\"GitLab bandit\"]\n  bx[\"gl-sast-report.json\"]\n  sbx[\"gl-sast-report.json\"]\n  breport[\"bandit gap analysis report\"]\n\n  subgraph bandit_comparison[\"bandit comparison\"]\n    banditsemgrep[\"GitLab Semgrep\"]\n    banditcompare[\"compare\"]\n    bandit --> |run analyzer on test-cases| bx\n    banditsemgrep --> |run analyzer on test-cases| sbx\n    bx --> banditcompare\n    sbx --> banditcompare\n  end\n  crr -->|bandit rules + rule id mappings| banditsemgrep\n  banditcompare --> breport\n\n  fx[\"gl-sast-report.json\"]\n  fbx[\"gl-sast-report.json\"]\n  freport[\"flawfinder gap analysis report\"]\n  flawfinder[\"GitLab flawfinder\"]\n\n  subgraph flawfinder_comparison[\"flawfinder comparison\"]\n    flawfindersemgrep[\"GitLab Semgrep\"]\n    flawfindercompare[\"compare\"]\n    flawfinder --> |run analyzer on test-cases| fx\n    flawfindersemgrep --> |run analyzer on test-cases| fbx\n    fx --> flawfindercompare\n    fbx --> flawfindercompare\n  end\n  crr -->|flawfinder rules + rule id mappings| flawfindersemgrep\n  flawfindercompare --> freport\n\n\u003C/pre>\n\n\nThe rule testing framework is a compass that guides us through the rule\ndevelopment process by automatically measuring the efficacy of rules that\nare stored in the central rule (git) repository. This measurement happens\nduring a comparison step that validates the findings reported by the\noriginal analyzer against the corresponding Semgrep rule-set. For the\ncomparisons we cross-validate the SAST\n\nreports\n([`gl-sast-report.json`](https://docs.gitlab.com/ee/user/application_security/sast/))\nthat adhere to the GitLab security report format. Since the main goal is to\nachieve parity between the original analyzer and our corresponding Semgrep\nrules, we treat the original analyzer as the baseline. The code snippet\nabove depicts two example comparison steps for bandit and flawfinder.  The\ngap analysis is explained in more detail in the \"rule testing\" section\nbelow.\n\n\nUsing a central rule git repository allows us to manage and easily track\nchanges that are applied to rules and their corresponding test-cases in a\ncentral location. By means of GitLab CI/CD, we have a mechanism to\nautomatically run tests that enforce constraints and guidelines on the rules\nand test-cases. Upon rule changes, we automatically trigger the rule-testing\nframework which enables us to spot gaps in our rules instantly. The\nstructure of the central rule repository is detailed in the \"rule\nmanagement\" section below.\n\n\n#### How we addressed rule management challenges\n\n\nThe central rule repository is used to store, keep track of changes applied\nto `rules/test-cases` for a variety of different languages. By having a\nseparate rule repository we can add CI jobs to test, verify, and enforce\nsyntax guidelines.\n\n\nThe structure we use for the central rule repository is depicted below and\nfollows the structure: `\u003Clanguage>/\u003Cruleclass>/{rule-\u003Crulename>.yml,\ntest-\u003Crulename>.*}` where language denotes the target programming language,\n`\u003Cruleclass>` is a descriptive name for the class of issues the rule aims to\ndetect and `\u003Crulename>` is a descriptive name for the actual rule.  We can\nhave multiple test cases per rule (all prefixed with `test-`) and rule files\n`rule-\u003Crulename>.yml` that are prefixed with `rule-` – a rule file contains\na single Semgrep rule.\n\n\n``` bash\n\n.\n\n├── mappings\n\n│   └── analyzer.yml\n\n├── c\n\n│   ├── buffer\n\n│   │   ├── rule-strcpy.yml\n\n│   │   ├── test-strcpy.c\n\n│   │   ├── rule-memcpy.yml\n\n│   │   └── test-memcpy.c\n\n│   └── ...\n\n└── javascript\n\n│   └── ...\n\n└── python\n\n│    ├── assert\n\n│    │   ├── rule-assert.yml\n\n│    │   └── test-assert.py\n\n│    └── exec\n\n│    │   ├── rule-exec.yml\n\n│    │   ├── test-exec.yml\n\n│    │   ├── rule-something.yml\n\n│    │   └── test-something.yml\n\n│    └── permission\n\n│    │   ├── rule-chmod.yml\n\n│    │   └── test-chmod.py\n\n│    └── ...\n\n└── ...\n\n```\n\n\nIn addition to the rules, we also store mapping files (in the `mappings`\nsubdirectory). The mappings directory in this repository contains YAML\nconfiguration/mapping files that map native analyzer IDs to the\ncorresponding Semgrep rules. An analyzer ID uniquely identifies the type of\nfinding. The information in the mapping files helps us to correlate the\nfinding from the original analyzer with their corresponding Semgrep findings\nand vice versa.\n\n\nThe mapping files are digested by the testing framework to perform an\nautomated gap analysis. The goal of this analysis is to check if there is an\nunexpected deviation between Semgrep (with the rules in this repository) and\na given analyzer.\n\n\nA mapping file groups distinct rules into rule-sets and, thus, can be used\nto bundle different rules based on a certain domain. An excerpt from a\nmapping file is depicted below – it maps bandit rules (identified by bandit\nIDs) to Semgrep rules from the central rule repository.\n\n\n``` yaml\n\nbandit:\n  - id: \"B101\"\n    rules:\n      - \"python/assert/rule-assert_used\"\n  - id: \"B102\"\n    rules:\n      - \"python/exec/rule-exec_used\"\n  - id: \"B103\"\n    rules:\n      - \"python/file_permissions/rule-general_bad_permission\"\n  - id: \"B104\"\n    rules:\n      - \"python/bind_all_interfaces/rule-general_bindall_interfaces\"\n```\n\n\n#### How the rule testing framework works\n\n\nThe test-oracle/baseline is provided by the original analyzer when executed\non the test-files. The rules in the central rule repository are compared and\nevaluated against this baseline. The execution of the testing framework is\ntriggered by any change applied to the rule repository.\n\n\nWe run all analyzers (flawfinder, bandit, etc.) and their corresponding\nSemgrep rule-sets (as defined by the mapping files) on the test-files from\nthe GitLab rule repository. The resulting `gl-sast-reports.json` reports\nthat are produced by the original analyzer and by the Semgrep analyzer are\nthen compared in a pairwise manner. To identify identical findings in both\nreports, we leverage the information from the mapping files that maps the\nrule-ids of the baseline analyzer to the corresponding Semgrep rule-ids for\nthe rules stored in the central rule repository.\n\n\nAs output, we produce a gap analysis report (in markdown format). The gap\nanalysis lists all the findings that have been reported by the original\nanalyzers and groups them into different tables (based on the native\nrule-ids). The screenshot below shows a single table from the gap analysis\nreport.\n\n\n![Gap Analysis\nReport](https://about.gitlab.com/images/blogimages/testing-framework-report.png){:\n.shadow.center}\n\nAn example table from the gap analysis report.\n\n{: .note.text-center}\n\n\nThe `X` symbols indicate whether the analyzers (in the example, flawfinder\nand Semgrep) were able to detect a given finding. The concrete findings as\nwell as the rule files are linked in the table. To reach full coverage,\nflawfinder as well as Semgrep have to cover the same findings for all the\nrules that are reported by the baseline.\n\n\n#### The analyzer replacement\n\n\nTo build a Semgrep rule-set that is on par with the capabilities of the\noriginal/baseline analyzer we leveraged the newly created testing framework.\nFlawfinder, a C/C++ analyzer, was the first analyzer we fully migrated to\nSemgrep using the testing framework as a compass.\n\n\nFirst, we checked the flawfinder implementation to identify the implemented\nrules. Given that flawfinder is a Python script and that the rules are\nessentially stored in a dictionary/hash data-structure, we were able to\nsemi-automatically extract the rules and generate the corresponding Semgrep\nrule files. We were also able to source the test-files from the flawfinder\nsource code repository.\n\n\nAfter the initial import of the first set of rules-files and test-cases, we\nused the information provided by the testing-framework to see which rules\nneeded refinement.\n\n\nWe responded to the information provided by our testing framework in the\nfollowing way:\n\n\n1. Findings covered by Baseline and covered by our rule-set: Nothing to be\ndone.\n\n1. Findings covered by Baseline but not covered by our rule-set: This\ndenotes an incomplete ruleset. In this case we extended the rule-file by\nproviding additional `pattern` entries.\n\n1. Findings not covered by Baseline but covered by our rule-set: This\nusually denotes that some rules are too vaguely formulated. In this case, we\nrefined our rules by using exclusions, e.g., by using `pattern-not` or by\nadding more detail to an already existing pattern.\n\n\nThe rule design was an iterative process where we closed the gaps between\nour semgrep rule-set and the flawfinder baseline in an iterative manner\nusing the testing framework as an oracle to ultimately achieve 100% parity.\n\n\n## How the GSoC project helped GitLab\n\n\nIn this GSoC project we successfully built an automated rule/configuration\ntesting framework that is driven by GitLab CI/CD capabilities and that\nprovided the data we needed to replace flawfinder reliably and quickly with\na corresponding Semgrep rule-set.\n\n\nIf you are interested in finding out more information about this GSoC\nproject, please check out the following repositories:\n\n\n- [Central Rule\nRepository](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/playground/sast-rules)\n\n- [Testing\nFramework](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing)\n\n- [Gap Analysis Computation\nTool](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/report-diff)\n\n- [Repository to track gap\nstatistics](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing-stats)\n",[929,9,974],{"slug":1317,"featured":6,"template":703},"write-vulnerability-detection-rules","content:en-us:blog:write-vulnerability-detection-rules.yml","Write Vulnerability Detection Rules","en-us/blog/write-vulnerability-detection-rules.yml","en-us/blog/write-vulnerability-detection-rules",{"_path":1323,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1324,"content":1330,"config":1337,"_id":1339,"_type":14,"title":1340,"_source":16,"_file":1341,"_stem":1342,"_extension":19},"/en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"title":1325,"description":1326,"ogTitle":1325,"ogDescription":1326,"noIndex":6,"ogImage":1327,"ogUrl":1328,"ogSiteName":686,"ogType":687,"canonicalUrls":1328,"schema":1329},"Setting up 100 AWS Graviton Spot Runners for GitLab","Utilizing the GitLab HA Scaling Runner Vending Machine for AWS Automation to setup 100 GitLab runners on AWS Spot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669882/Blog/Hero%20Images/hundredgitlabspotrunner.png","https://about.gitlab.com/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Nupur Sharma\"}],\n        \"datePublished\": \"2021-08-17\",\n      }",{"title":1331,"description":1326,"authors":1332,"heroImage":1327,"date":1334,"body":1335,"category":721,"tags":1336},"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour",[718,1333],"Nupur Sharma","2021-08-17","Managing elastically scaled or highly available compute infrastructures is\none of the key challenges the cloud was built for. Application scaling\nconcerns can be handled by cloud services that are purpose designed,\nrigorously tested, and continually improved. This article dives into some\nspecific enablement automation that brings the benefits of AWS Autoscaling\nGroups (ASG) to runner management. There are benefits to both the largest\nfleets and single instance runners.\n\n\nEmbedded in this article is a YouTube video that demonstrates the deployment\nof 100 GitLab runners on Amazon EC2 Spot compute in less than 10 minutes\nusing less than 10 clicks. The video also shows updating this entire fleet\nin under 10 minutes to emphasize the time savings of built-in maintenace.\n\n\nThe information and automation in this article applies to GitLab Private\nRunners which are deployed on your own compute resources. Self-managed\nGitLab instances require private runners, but they can also be configured\nand used with GitLab.com SaaS accounts.\n\n\n## Well-architected runner management\n\n\nThere are many different reasons that a customer might need to deploy\nmultiple runners with various characteristics. Some of the more popular ones\nare:\n\n\n- Workloads that require large-scale runner fleets.\n\n- To gain cost savings through Spot compute, uptime scheduling, and ARM\narchitecture.\n\n- Projects with high demand of CI activity to make sure that the runner is\nnot being held up by jobs on another project.\n\n- Jobs that have special security requirements, e.g., security credentials,\nrole-based access or managed identities for Continuous Delivery (CD). These\nsecurity requirements can enable instance-level (AWS IAM Instance Profile)\nsecurity by allowing runners with sufficient rights to deploy in specific\ntarget environments. For example, a CD runner for non-production\nenvironments and a different runner for production.\n\n- Implementing role-based access control rather than user-based. This means\nusers don't have to use secrets to manage security requirements for CI jobs\nto accomplish their tasks.\n\n- Development teams can be confident the runner has the same capabilities\nfor CI and CD automation they test through their interactive logins by\nleveraging a common IAM role.\n\n\n### The challenges of building production-grade elastic GitLab Runners\n\n\n[The GitLab Runner](https://docs.gitlab.com/runner/) is the workhorse of\nGitLab CI and CD capabilities. The runner can handle numerous operating\nenvironments and automation functions for a GitLab instance. The GitLab\nRunner has become very sophisticated due to the broad range of supported\nenvironments. In order to successfully configure the GitLab Runner as a\nset-it-and-forget-it service, the user has to work through many different\ndecisions and considerations. We summarize some of the GitLab\nRunner-specific considerations that can be challenging:\n\n\n- There are a lot of configuration options and scenarios to sort through. It\ncan be an iterative process to discover what needs to be done to set up\nGitLab Runners.\n\n- Ensuring runners are a production-grade capability requires Infrastructure\nas Code (IaC) development so that high availability and scaling can be\nachieved by automatically spawning new instances.\n\n- Ensuring that runner deregistration happens correctly when GitLab Runners\nare automatically scaled in.\n\n- Additional cost-saving configurations, such as Spot compute and scheduled\nrunner uptime, can complicate the automation requirements for AWS\nAutoscaling Groups (ASGs).\n\n- Large organizations often want developers to be able to easily\nself-service deploy runners with various configurations. Service Management\nAutomation (SMA) has been made popular with products like Service Now, AWS\nService Catalog, and AWS Control Tower. This automation is compatible with\nSMA.\n\n- It can be difficult to map runners to AWS and map AWS to runners in large\norganizations with numerous runners and AWS accounts.\n\n\n### Introducing the GitLab HA Scaling Runner Vending Machine for AWS\n\n\nAn effective way to handle multiple design considerations is to make a\nreusable tool. To help you with best practice runner deployments on AWS, we\ncreated the [GitLab HA Scaling Runner Vending Machine for\nAWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/)\n(\"The GitLab Runner Vending Machine\"). It is created in AWS’ Infrastructure\nas Code, known as CloudFormation.\n\n\n> **Designed with AWS Well Architected:** This automation has many features\nbeyond the scope of this blog post. The primary focus of this blog post is\non managing costs. See the [full list of features\nhere](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md).\n\n\nThe GitLab Runner Vending Machine has the following cost management and\nscaling management benefits, exposed as a variety of parameters:\n\n\n- The ability to leverage Spot compute instances. This is important because\nit leaves CI/CD pipeline developers in charge of whether specific Gitlab\nCI/CD jobs run on Spot compute or not.\n\n- ASG-scheduled scaling so that a runner or runner fleet can be completely\nshutdown when not in use.\n\n- The GitLab Runner Vending Machine can leverage ARM compute for Linux -\nwhich runs faster and costs less.\n\n- It can also use ASG to update all runners in a fleet with the latest\nmachine images and GitLab Runner version (or a specific version). When\nmaintenance is not built-in, the labor cost of keeping things up-to-date can\nbe significant.\n\n- Runner naming and tagging in AWS and GitLab, which eases the burden of\nlocating runner instances and managing orphaned runners registrations,\nwhether it is manual or automated.\n\n\n### How to save money with The GitLab Runner Vending Machine\n\n\nSignificant savings are possible with this IaC, whether your team wants to\nsave on a single runner or a fleet of them.\n\n\nThe savings calculations below are for a single runner and should be linear\nfor a given workload. To calculate your savings for more runners, simply\nmultiply the final result by the number of runner instances. The available\n\"Runner Minutes\" per hour is calculated as the runner's job concurrency\nsetting multiplied by the minutes in an hour. For this exercise, we'll use\njob concurrency of \"10\". This number should be changed depending on the\ninstance types you are using and the load testing of your typical CI/CD\nworkloads.\n\n\nJust like most performance analysis, we are assuming that hardware resource\nutilization is optimal and consistent. If a runner cluster can sustain\nrespectable performance with 80% CPU loading, this calculation assumes that\nwould be maintained regardless of the size of the cluster.\n\n\n#### AWS Graviton ARM and Spot savings\n\n\nThe GitLab Runner engineering team has completed performance testing that\ndemonstrates performance gains of more than 30% on some AWS Graviton\n(ARM-based) instance types. Assuming that runners are performance-managed\nfor optimized utilization, this gain is a direct cost savings. Just\nrecently, we shared [how deploying GitLab on Arm-based AWS Graviton2\nresulted in cost savings of 23% and 36% performance\ngains](/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor/).\n\n\n![ARM Efficiency Test Results For GitLab\nRunner](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image1.png)\n\nGitLab Runner testing results for ARM-efficiency gains.\n\n{: .note.text-center}\n\n\n#### Scheduling savings\n\n\nThe savings can be dramatic when teams are able to turn off runners when not\nin use. For instance: Scheduling a runner to operate for 40-hours per week\nsaves 76% when compared to the cost of running it for 168 hours. Runners\nthat are just in use for 10 hours per week saves 94%.\n\n\n#### Combining scheduling, Spot, and ARM to save 97%\n\n\nJust for fun, let's see what savings are possible by comparing a standard\nrunner scenario with deploying runners in customized, stand-alone instances\nto the maximum savings automation can deliver.\n\n\nImagine I am a developer who set up a custom GitLab Runner on an m5.xlarge\ninstance, which is x86 the architecture, for a development team that works\nfor 40 hours on the same time zone. Since there is no automation, the GitLab\nRunner runs 24/7. We will assume a job concurrency of 10, which gives 600\n\"runner minutes\" per hour of run time. Scheduling uptime, running on Spot,\nand leveraging ARM can all be achieved quickly by redeploying the runner\nwith The GitLab Runner Vending Machine.\n\n\nHere is the calculation to run the configuration described above, for one\nweek: On Demand, x86, Always On: 1 x m5.xlarge = .192/hr x 168 hrs/week =\n**$32/week or $1664/year**\n\n\nHere are the savings that come from running Spot, ARM, and scheduling the\nRunner to be up just 40hrs/week: 1 x m6g.large Spot = .0419 x 40hrs/week x\n64% (36% better performance) = **$1/week**\n\n\n$1/$32 x 100 = 3.125% of the original cost for the same work. In other\nwords, **we just saved 97%** without ever impacting the ability to get the\njob done.\n\n\nIn short, The GitLab Runner Vending Machine intends to bring the many cost\nsaving mechanisms of AWS Cloud computing to your GitLab Runner fleets.\n\n\nYou can save costs by using ARM/Graviton instances, Spot compute, or by\nscheduling uptime. In many cases, you can combine all three savings\nmechanisms for maximum impact.\n\n\n### Special pipeline building concerns for Spot Runners\n\n\nSpot instances can disappear with as little as two minutes of warning. This\ninevitably means some runners will be terminated while jobs are still in\nprogress. CI/CD pipeline developers must take into account whether a job\nought to run on compute resources that can disappear with short notice (so\nshort as to be considered \"no notice\"). This comes down to deciding what\njobs are OK to run on Spot and what jobs should instead run on AWS'\npersistent compute known as \"On-Demand\".\n\n\nThe GitLab Runner Vending Machine accounts for these constraints by tagging\nrunner instances in GitLab with `computetype-spot` or `computetype-ondemand`\n– indicating in the \"tags\" segment of GitLab CI/CD jobs if a job should run\non Spot compute.\n\n\nSome types of CI workloads, e.g., mass performance testing or large unit\ntesting suites, may already have work queues and work tracking that make it\nideal for Spot compute. Other activities, e.g., polling another system for a\ndeployment status, could suffer a material discrepancy if terminated\npermaturely. Others, such as building the application, are sort of in the\nmiddle. Usually, restarting the build is sufficient.\n\n\n### Job configuration for Spot\n\n\nIf you need to reschedule terminated work, it is helpful to configure\nGitLab’s job `retry:` keyword. When working with a dispatching engine or\nwork queue that automatically accounts for incompleted work by processing\nagents, the retry configuration is unnecessary.\n\n\nHere is an example that implements both of these concepts:\n\n\n```\n\nmy-scaled-test-suite:\n  parallel: 100\n  tags:\n  - computetype-Spot\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n      - unknown_failure\n```\n\n\nThe usage and limitations of `retry:` are discussed in greater detail in the\n[GitLab CI documentation on\nretry](https://docs.gitlab.com/ee/ci/yaml/#retry).\n\n\n### How to get started\n\n\nThe CloudFormation templates for the [GitLab Runner Vending Machine are\nmanaged in a public project on\nGitLab.com](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/).\nThere is a lot of information in the project about how the solution works\nand what problems it aims to solve, and will be useful for very experienced\nAWS builders.\n\n\nBut to keep it simple for users who want the quickest path to creating\nrunners of all sizes, it also has an \"easy button\" page that has a table\nthat looks like this:\n\n\n![Easy Button Page\nSample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image2.png)\n\nThe easy buttons launch a CloudFormation Quick Create that only requires\nfilling in a few fields.\n\n{: .note.text-center}\n\n\nKeep in mind that easy buttons intentionally hide the high degree of\ncustomization that is possible with this automation by setting the\nparameters for the most common scenarios in advance. Advanced AWS users\nshould read more of the documentation in the repository to understand that\nthe GitLab Runner Vending Machine is also capable of creating sophisticated\nrunner fleets.\n\n\nFirst, click the CloudFormation icons to launch the Easy Button template\ndirectly into the CloudFormation Quick Create console. The Quick Create\nconsole is designed for simplicity to enable you to complete the prompts and\nthen click one button to launch the stack.\n\n\n![CloudFormation Quick Create\nExample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image3.png){:\n.shadow.medium.center}\n\nThis is a typical Quick Create form for the GitLab Vending Machine easy\nbuttons.\n\n{: .note.text-center}\n\n\nNext, select the deploy region by using the drop down menu in the upper\nright of the console (where the screenshot says \"Oregon\").\n\n\nIn most cases, you will only need to add your GitLab instance URL\n(GitLab.com is fine if that is where your repositories are), and the runner\ntoken, which you retrieve from the group level or project you wish to attach\nthe runners to. If you are registering against a self-managed instance, you\ncan use the instance-level tokens from the administrator console to register\nthe runner for use across the entire instance. Read on for [instructions for\nfinding Runner Registration\nTokens](https://docs.gitlab.com/runner/register/#requirements).\n\n\nA few other customization parameters are available for your convenience.\n\n\nNote that the automation attempts to use the default VPC of the region in\nwhich you deploy and the default security group for the VPC. In some\norganizations, default VPCs and/or their security groups are locked. You can\ndeploy to custom VPCs by using the full template instead of an easy button.\nOn the easy button page look for the footnote \"Not any easy button person?\"\"\nto find a link to the full template.\n\n\nWatch the video below to see the deployment of provisioning 100 GitLab Spot\nRunners on AWS in less than 10 minutes and in less than 10 clicks for just\n$5 per hour.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/EW4RJv5zW4U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nCheck out the YouTube playlist for more relevant videos about [GitLab and\nAWS](https://youtube.com/playlist?list=PL05JrBw4t0Ko30Bkf8bAvR-8E441Fy2G9)\n\n\n### This automation does much, much more\n\n\nWhile this article focused how much you can saving while using Spot for\nscaled runners, the underlying automation is capable of many other\nscenarios. Below is a summary of the additional features and benefits\ncovered in the documentation.\n\n\n- Scaled runners that are persistent (not Spot) ([see more easy buttons\nhere](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/easybuttons.md)).\n\n- Supports small, single runner setups and scaled ones.\n\n- Supports GitLab.com SaaS or self-managed instances.\n\n- Automates OS patching and Runner version upgrading.\n\n- Supports Windows and Linux.\n\n- Can be reused with Amazon provisioning services such as Service Catalog\nand Control Tower.\n\n- Implements least privilege security throughout.\n\n- Supports deregistering runners on scale-in or Spot termination.\n\n\nA full feature list is in the document [Features of GitLab HA Scaling Runner\nVending Machine for\nAWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md)\n\n\n### Easy running\n\n\nWe hope that this automation will make deployment of runners of all sizes\nsimple for you. We are open to your feedback, suggestions and contributions\nin the GitLab project.\n",[745,746,723,9],{"slug":1338,"featured":6,"template":703},"100-runners-in-less-than-10mins-and-less-than-10-clicks","content:en-us:blog:100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","100 Runners In Less Than 10mins And Less Than 10 Clicks","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",4,[679,708,730,753,774,794,817,837,857],1758326238089]