[{"data":1,"prerenderedAt":3394},["ShallowReactive",2],{"/en-us/blog/tags/ai-ml/":3,"navigation-en-us":20,"banner-en-us":450,"footer-en-us":467,"AI/ML-tag-page-en-us":677},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/ai-ml","tags",false,"",{"tag":9,"tagSlug":10},"AI/ML","ai-ml",{"template":12},"BlogTag","content:en-us:blog:tags:ai-ml.yml","yaml","Ai Ml","content","en-us/blog/tags/ai-ml.yml","en-us/blog/tags/ai-ml","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":446,"_type":14,"title":447,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":377,"minimal":408,"duo":427,"pricingDeployment":436},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,187,192,298,358],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":169},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,148],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,138,143],{"text":134,"config":135},"Application Security Testing",{"href":136,"dataGaName":137,"dataGaLocation":28},"/solutions/application-security-testing/","Application security testing",{"text":139,"config":140},"Software Supply Chain Security",{"href":141,"dataGaLocation":28,"dataGaName":142},"/solutions/supply-chain/","Software supply chain security",{"text":144,"config":145},"Software Compliance",{"href":146,"dataGaName":147,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":149,"link":150,"items":155},"Measurement",{"config":151},{"icon":152,"href":153,"dataGaName":154,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[156,160,164],{"text":157,"config":158},"Visibility & Measurement",{"href":153,"dataGaLocation":28,"dataGaName":159},"Visibility and Measurement",{"text":161,"config":162},"Value Stream Management",{"href":163,"dataGaLocation":28,"dataGaName":161},"/solutions/value-stream-management/",{"text":165,"config":166},"Analytics & Insights",{"href":167,"dataGaLocation":28,"dataGaName":168},"/solutions/analytics-and-insights/","Analytics and insights",{"title":170,"items":171},"GitLab for",[172,177,182],{"text":173,"config":174},"Enterprise",{"href":175,"dataGaLocation":28,"dataGaName":176},"/enterprise/","enterprise",{"text":178,"config":179},"Small Business",{"href":180,"dataGaLocation":28,"dataGaName":181},"/small-business/","small business",{"text":183,"config":184},"Public Sector",{"href":185,"dataGaLocation":28,"dataGaName":186},"/solutions/public-sector/","public sector",{"text":188,"config":189},"Pricing",{"href":190,"dataGaName":191,"dataGaLocation":28,"dataNavLevelOne":191},"/pricing/","pricing",{"text":193,"config":194,"link":196,"lists":200,"feature":285},"Resources",{"dataNavLevelOne":195},"resources",{"text":197,"config":198},"View all resources",{"href":199,"dataGaName":195,"dataGaLocation":28},"/resources/",[201,234,257],{"title":202,"items":203},"Getting started",[204,209,214,219,224,229],{"text":205,"config":206},"Install",{"href":207,"dataGaName":208,"dataGaLocation":28},"/install/","install",{"text":210,"config":211},"Quick start guides",{"href":212,"dataGaName":213,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":215,"config":216},"Learn",{"href":217,"dataGaLocation":28,"dataGaName":218},"https://university.gitlab.com/","learn",{"text":220,"config":221},"Product documentation",{"href":222,"dataGaName":223,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":225,"config":226},"Best practice videos",{"href":227,"dataGaName":228,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":230,"config":231},"Integrations",{"href":232,"dataGaName":233,"dataGaLocation":28},"/integrations/","integrations",{"title":235,"items":236},"Discover",[237,242,247,252],{"text":238,"config":239},"Customer success stories",{"href":240,"dataGaName":241,"dataGaLocation":28},"/customers/","customer success stories",{"text":243,"config":244},"Blog",{"href":245,"dataGaName":246,"dataGaLocation":28},"/blog/","blog",{"text":248,"config":249},"Remote",{"href":250,"dataGaName":251,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":253,"config":254},"TeamOps",{"href":255,"dataGaName":256,"dataGaLocation":28},"/teamops/","teamops",{"title":258,"items":259},"Connect",[260,265,270,275,280],{"text":261,"config":262},"GitLab Services",{"href":263,"dataGaName":264,"dataGaLocation":28},"/services/","services",{"text":266,"config":267},"Community",{"href":268,"dataGaName":269,"dataGaLocation":28},"/community/","community",{"text":271,"config":272},"Forum",{"href":273,"dataGaName":274,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":276,"config":277},"Events",{"href":278,"dataGaName":279,"dataGaLocation":28},"/events/","events",{"text":281,"config":282},"Partners",{"href":283,"dataGaName":284,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":286,"textColor":287,"text":288,"image":289,"link":293},"#2f2a6b","#fff","Insights for the future of software development",{"altText":290,"config":291},"the source promo card",{"src":292},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":294,"config":295},"Read the latest",{"href":296,"dataGaName":297,"dataGaLocation":28},"/the-source/","the source",{"text":299,"config":300,"lists":302},"Company",{"dataNavLevelOne":301},"company",[303],{"items":304},[305,310,316,318,323,328,333,338,343,348,353],{"text":306,"config":307},"About",{"href":308,"dataGaName":309,"dataGaLocation":28},"/company/","about",{"text":311,"config":312,"footerGa":315},"Jobs",{"href":313,"dataGaName":314,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":314},{"text":276,"config":317},{"href":278,"dataGaName":279,"dataGaLocation":28},{"text":319,"config":320},"Leadership",{"href":321,"dataGaName":322,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":324,"config":325},"Team",{"href":326,"dataGaName":327,"dataGaLocation":28},"/company/team/","team",{"text":329,"config":330},"Handbook",{"href":331,"dataGaName":332,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":334,"config":335},"Investor relations",{"href":336,"dataGaName":337,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":339,"config":340},"Trust Center",{"href":341,"dataGaName":342,"dataGaLocation":28},"/security/","trust center",{"text":344,"config":345},"AI Transparency Center",{"href":346,"dataGaName":347,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":349,"config":350},"Newsletter",{"href":351,"dataGaName":352,"dataGaLocation":28},"/company/contact/","newsletter",{"text":354,"config":355},"Press",{"href":356,"dataGaName":357,"dataGaLocation":28},"/press/","press",{"text":359,"config":360,"lists":361},"Contact us",{"dataNavLevelOne":301},[362],{"items":363},[364,367,372],{"text":35,"config":365},{"href":37,"dataGaName":366,"dataGaLocation":28},"talk to sales",{"text":368,"config":369},"Get help",{"href":370,"dataGaName":371,"dataGaLocation":28},"/support/","get help",{"text":373,"config":374},"Customer portal",{"href":375,"dataGaName":376,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":378,"login":379,"suggestions":386},"Close",{"text":380,"link":381},"To search repositories and projects, login to",{"text":382,"config":383},"gitlab.com",{"href":42,"dataGaName":384,"dataGaLocation":385},"search login","search",{"text":387,"default":388},"Suggestions",[389,391,395,397,401,405],{"text":57,"config":390},{"href":62,"dataGaName":57,"dataGaLocation":385},{"text":392,"config":393},"Code Suggestions (AI)",{"href":394,"dataGaName":392,"dataGaLocation":385},"/solutions/code-suggestions/",{"text":109,"config":396},{"href":111,"dataGaName":109,"dataGaLocation":385},{"text":398,"config":399},"GitLab on AWS",{"href":400,"dataGaName":398,"dataGaLocation":385},"/partners/technology-partners/aws/",{"text":402,"config":403},"GitLab on Google Cloud",{"href":404,"dataGaName":402,"dataGaLocation":385},"/partners/technology-partners/google-cloud-platform/",{"text":406,"config":407},"Why GitLab?",{"href":70,"dataGaName":406,"dataGaLocation":385},{"freeTrial":409,"mobileIcon":414,"desktopIcon":419,"secondaryButton":422},{"text":410,"config":411},"Start free trial",{"href":412,"dataGaName":33,"dataGaLocation":413},"https://gitlab.com/-/trials/new/","nav",{"altText":415,"config":416},"Gitlab Icon",{"src":417,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":415,"config":420},{"src":421,"dataGaName":418,"dataGaLocation":413},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":423,"config":424},"Get Started",{"href":425,"dataGaName":426,"dataGaLocation":413},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":428,"mobileIcon":432,"desktopIcon":434},{"text":429,"config":430},"Learn more about GitLab Duo",{"href":62,"dataGaName":431,"dataGaLocation":413},"gitlab duo",{"altText":415,"config":433},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":435},{"src":421,"dataGaName":418,"dataGaLocation":413},{"freeTrial":437,"mobileIcon":442,"desktopIcon":444},{"text":438,"config":439},"Back to pricing",{"href":190,"dataGaName":440,"dataGaLocation":413,"icon":441},"back to pricing","GoBack",{"altText":415,"config":443},{"src":417,"dataGaName":418,"dataGaLocation":413},{"altText":415,"config":445},{"src":421,"dataGaName":418,"dataGaLocation":413},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":452,"button":453,"image":458,"config":462,"_id":464,"_type":14,"_source":16,"_file":465,"_stem":466,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":454,"config":455},"Try the Beta",{"href":456,"dataGaName":457,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":459,"config":460},"GitLab Duo Agent Platform",{"src":461},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":463},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":468,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":469,"_id":673,"_type":14,"title":674,"_source":16,"_file":675,"_stem":676,"_extension":19},"/shared/en-us/main-footer",{"text":470,"source":471,"edit":477,"contribute":482,"config":487,"items":492,"minimal":665},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":472,"config":473},"View page source",{"href":474,"dataGaName":475,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":478,"config":479},"Edit this page",{"href":480,"dataGaName":481,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":483,"config":484},"Please contribute",{"href":485,"dataGaName":486,"dataGaLocation":476},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":488,"facebook":489,"youtube":490,"linkedin":491},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[493,516,572,601,635],{"title":46,"links":494,"subMenu":499},[495],{"text":496,"config":497},"DevSecOps platform",{"href":55,"dataGaName":498,"dataGaLocation":476},"devsecops platform",[500],{"title":188,"links":501},[502,506,511],{"text":503,"config":504},"View plans",{"href":190,"dataGaName":505,"dataGaLocation":476},"view plans",{"text":507,"config":508},"Why Premium?",{"href":509,"dataGaName":510,"dataGaLocation":476},"/pricing/premium/","why premium",{"text":512,"config":513},"Why Ultimate?",{"href":514,"dataGaName":515,"dataGaLocation":476},"/pricing/ultimate/","why ultimate",{"title":517,"links":518},"Solutions",[519,524,526,528,533,538,542,545,549,554,556,559,562,567],{"text":520,"config":521},"Digital transformation",{"href":522,"dataGaName":523,"dataGaLocation":476},"/topics/digital-transformation/","digital transformation",{"text":134,"config":525},{"href":136,"dataGaName":134,"dataGaLocation":476},{"text":123,"config":527},{"href":105,"dataGaName":106,"dataGaLocation":476},{"text":529,"config":530},"Agile development",{"href":531,"dataGaName":532,"dataGaLocation":476},"/solutions/agile-delivery/","agile delivery",{"text":534,"config":535},"Cloud transformation",{"href":536,"dataGaName":537,"dataGaLocation":476},"/topics/cloud-native/","cloud transformation",{"text":539,"config":540},"SCM",{"href":119,"dataGaName":541,"dataGaLocation":476},"source code management",{"text":109,"config":543},{"href":111,"dataGaName":544,"dataGaLocation":476},"continuous integration & delivery",{"text":546,"config":547},"Value stream management",{"href":163,"dataGaName":548,"dataGaLocation":476},"value stream management",{"text":550,"config":551},"GitOps",{"href":552,"dataGaName":553,"dataGaLocation":476},"/solutions/gitops/","gitops",{"text":173,"config":555},{"href":175,"dataGaName":176,"dataGaLocation":476},{"text":557,"config":558},"Small business",{"href":180,"dataGaName":181,"dataGaLocation":476},{"text":560,"config":561},"Public sector",{"href":185,"dataGaName":186,"dataGaLocation":476},{"text":563,"config":564},"Education",{"href":565,"dataGaName":566,"dataGaLocation":476},"/solutions/education/","education",{"text":568,"config":569},"Financial services",{"href":570,"dataGaName":571,"dataGaLocation":476},"/solutions/finance/","financial services",{"title":193,"links":573},[574,576,578,580,583,585,587,589,591,593,595,597,599],{"text":205,"config":575},{"href":207,"dataGaName":208,"dataGaLocation":476},{"text":210,"config":577},{"href":212,"dataGaName":213,"dataGaLocation":476},{"text":215,"config":579},{"href":217,"dataGaName":218,"dataGaLocation":476},{"text":220,"config":581},{"href":222,"dataGaName":582,"dataGaLocation":476},"docs",{"text":243,"config":584},{"href":245,"dataGaName":246,"dataGaLocation":476},{"text":238,"config":586},{"href":240,"dataGaName":241,"dataGaLocation":476},{"text":248,"config":588},{"href":250,"dataGaName":251,"dataGaLocation":476},{"text":261,"config":590},{"href":263,"dataGaName":264,"dataGaLocation":476},{"text":253,"config":592},{"href":255,"dataGaName":256,"dataGaLocation":476},{"text":266,"config":594},{"href":268,"dataGaName":269,"dataGaLocation":476},{"text":271,"config":596},{"href":273,"dataGaName":274,"dataGaLocation":476},{"text":276,"config":598},{"href":278,"dataGaName":279,"dataGaLocation":476},{"text":281,"config":600},{"href":283,"dataGaName":284,"dataGaLocation":476},{"title":299,"links":602},[603,605,607,609,611,613,615,619,624,626,628,630],{"text":306,"config":604},{"href":308,"dataGaName":301,"dataGaLocation":476},{"text":311,"config":606},{"href":313,"dataGaName":314,"dataGaLocation":476},{"text":319,"config":608},{"href":321,"dataGaName":322,"dataGaLocation":476},{"text":324,"config":610},{"href":326,"dataGaName":327,"dataGaLocation":476},{"text":329,"config":612},{"href":331,"dataGaName":332,"dataGaLocation":476},{"text":334,"config":614},{"href":336,"dataGaName":337,"dataGaLocation":476},{"text":616,"config":617},"Sustainability",{"href":618,"dataGaName":616,"dataGaLocation":476},"/sustainability/",{"text":620,"config":621},"Diversity, inclusion and belonging (DIB)",{"href":622,"dataGaName":623,"dataGaLocation":476},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":339,"config":625},{"href":341,"dataGaName":342,"dataGaLocation":476},{"text":349,"config":627},{"href":351,"dataGaName":352,"dataGaLocation":476},{"text":354,"config":629},{"href":356,"dataGaName":357,"dataGaLocation":476},{"text":631,"config":632},"Modern Slavery Transparency Statement",{"href":633,"dataGaName":634,"dataGaLocation":476},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":636,"links":637},"Contact Us",[638,641,643,645,650,655,660],{"text":639,"config":640},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":476},{"text":368,"config":642},{"href":370,"dataGaName":371,"dataGaLocation":476},{"text":373,"config":644},{"href":375,"dataGaName":376,"dataGaLocation":476},{"text":646,"config":647},"Status",{"href":648,"dataGaName":649,"dataGaLocation":476},"https://status.gitlab.com/","status",{"text":651,"config":652},"Terms of use",{"href":653,"dataGaName":654,"dataGaLocation":476},"/terms/","terms of use",{"text":656,"config":657},"Privacy statement",{"href":658,"dataGaName":659,"dataGaLocation":476},"/privacy/","privacy statement",{"text":661,"config":662},"Cookie preferences",{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":666},[667,669,671],{"text":651,"config":668},{"href":653,"dataGaName":654,"dataGaLocation":476},{"text":656,"config":670},{"href":658,"dataGaName":659,"dataGaLocation":476},{"text":661,"config":672},{"dataGaName":663,"dataGaLocation":476,"id":664,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":678,"featuredPost":3373,"totalPagesCount":3392,"initialPosts":3393},[679,699,724,745,766,786,806,821,842,863,883,903,924,944,964,985,1004,1023,1044,1064,1092,1112,1132,1150,1168,1189,1209,1230,1250,1272,1291,1310,1329,1349,1369,1389,1410,1426,1446,1465,1484,1503,1521,1539,1560,1579,1598,1618,1639,1658,1676,1693,1712,1732,1750,1769,1788,1807,1825,1844,1863,1883,1903,1923,1942,1960,1980,1999,2021,2039,2058,2077,2097,2115,2134,2152,2171,2191,2212,2234,2253,2272,2291,2310,2330,2350,2370,2389,2409,2426,2443,2463,2484,2502,2521,2540,2558,2576,2593,2611,2629,2647,2666,2685,2704,2725,2744,2762,2780,2798,2817,2836,2854,2875,2895,2915,2931,2948,2967,2985,3004,3024,3043,3062,3081,3099,3117,3136,3154,3174,3194,3213,3232,3250,3268,3286,3305,3322,3340,3358],{"_path":680,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":681,"content":684,"config":692,"_id":695,"_type":14,"title":696,"_source":16,"_file":697,"_stem":698,"_extension":19},"/en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms",{"noIndex":6,"title":682,"description":683},"3 best practices for building software in the era of LLMs","With AI transforming coding speed, developers need new security habits. Learn what they are and how to deploy them throughout the DevSecOps workflow.",{"title":682,"description":683,"authors":685,"heroImage":687,"body":688,"date":689,"category":10,"tags":690},[686],"Salman Ladha","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662523/Blog/Hero%20Images/Gartner_DevOps_Blog_Post_Cover_Image_1800x945__2_.png","AI has rapidly become a core part of modern software development. Not only is it helping developers code faster than ever, but it’s also automating low-level tasks like writing test cases or summarizing documentation. According to our [2024 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/), 81% of developers are already using AI in their workflows or plan to in the next two years.\n\nAs code is written with less manual effort, we’re seeing a subtle but important behavioral change: Developers are beginning to trust AI-generated code with less scrutiny. That confidence — understandable as it may be — can quietly introduce security risks, especially as the overall volume of code increases. Developers can’t be expected to stay on top of every vulnerability or exploit, which is why we need systems and safeguards that scale with them. AI tools are here to stay. So, as security professionals, it’s incumbent on you to empower developers to adopt them in a way that improves both speed and security.\n\nHere are three practical ways to do that. \n\n## Never trust, always verify\n\nAs mentioned above, developers are beginning to trust AI-generated code more readily, especially when it looks clean and compiles without error. To combat this, adopt a zero-trust mindset. While we often talk about [zero trust](https://about.gitlab.com/blog/why-devops-and-zero-trust-go-together/) in the context of identity and access management, the same principle can be applied here with a slightly different framing. Treat AI-generated code like input from a junior developer: helpful, but not production-ready without a proper review. \n\nA developer should be able to explain what the code is doing and why it’s safe before it gets merged. Reviewing AI-generated code might even shape up to be an emerging skillset required in the world of software development. The developers who excel at this will be indispensable because they’ll marry the speed of LLMs with the risk reduction mindset to produce secure code, faster. \n\nThis is where tools like [GitLab Duo Code Review](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/) can help. As a feature of our AI companion across the software development lifecycle, it brings AI into the code review process, not to replace human judgment, but to enhance it. By surfacing questions, inconsistencies, and overlooked issues in the merge requests, AI can help developers keep up with the very AI that’s accelerating development cycles. \n\n## Prompt for secure patterns\n\nLarge language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/))  are powerful, but only as precise as the prompts they’re given. That’s why prompt engineering is becoming a core part of working with AI tools. In the world of LLMs, your input *is* the interface. Developers who learn to write clear, security-aware prompts will play a key role in building safer software from the start.\n\nFor example, vague requests like “build a login form” often produce insecure or overly simplistic results. However, by including more context, such as “build a login form **with** input validation, rate limiting, and hashing, **and** support phishing-resistant authentication methods like passkeys,” you’re more likely to produce an output that meets the security standards of your organization. \n\nRecent [research](https://www.backslash.security/press-releases/backslash-security-reveals-in-new-research-that-gpt-4-1-other-popular-llms-generate-insecure-code-unless-explicitly-prompted) from Backlash Security backs this up. They found that secure prompting improved results across popular LLMs. When developers simply asked models to “write secure code,” success rates remained low. However, when prompts referenced [OWASP best practices](https://cheatsheetseries.owasp.org/cheatsheets/LLM_Prompt_Injection_Prevention_Cheat_Sheet.html), the rate of secure code generation increased. \n\nPrompt engineering should be part of how we train and empower security champions within development teams. Just like we teach secure coding patterns and threat modeling, we should also be teaching developers how to guide AI tools with the same security mindset. \n\n> Learn more with these helpful [prompt engineering tips](https://docs.gitlab.com/development/ai_features/prompt_engineering/).\n\n## Scan everything, no exceptions\n\nThe rise of AI means we’re writing more code, quicker, with the same number of humans. That shift should change how we think about security, not just as a final check, but as an always-on safeguard woven into every aspect of the development process.\n\nMore code means a wider attack surface. And when that code is partially or fully generated, we can’t solely rely on secure coding practices or individual intuition to spot risks. That’s where automated scanning comes in. [Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/), [Software Composition Analysis (SCA)](https://docs.gitlab.com/user/application_security/dependency_scanning/), and [Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/) become critical controls to mitigate the risk of secret leaks, supply chain attacks, and weaknesses like SQL injections. With platforms like GitLab, [application security](https://about.gitlab.com/solutions/security-compliance/) is natively built into the developer's workflow, making it a natural part of the development lifecycle. Scanners can also trace through the entire program to make sure new AI-generated code is secure *in the context of all the other code* — that can be hard to spot if you’re just looking at some new code in your IDE or in an AI-generated patch.\n\nBut it’s not just about scanning, it’s about keeping pace. If development teams are going to match the speed of AI-assisted development, they need scans that are fast, accurate, and built to scale. Accuracy especially matters. If scanners overwhelm developers with false positives, there’s a risk of losing trust in the system altogether. \n\nThe only way to move fast *and* stay secure is to make scanning non-negotiable. \n\nEvery commit. Every branch. No exceptions.\n\n## Secure your AI-generated code with GitLab\n\nAI is changing the way we build software, but the fundamentals of secure software development still apply. Code still needs to be reviewed. Threats still need to be tested. And security still needs to be embedded in the way we work. At GitLab, that’s exactly what we’ve done. \n\nAs a developer platform, we’re not bolting security onto the workflow — we’re embedding it directly where developers already work: in the IDE, in merge requests, and in the pipeline. Scans run automatically and relevant security context is surfaced to facilitate faster remediation cycles. And, because it’s part of the same platform where developers build, test, and deploy software, there are fewer tools to juggle, less context switching, and a much smoother path to secure code.\n\nAI features like [Duo Vulnerability Explanation and Vulnerability Resolution](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/) add another layer of speed and insight, helping developers understand risks and fix them faster, without breaking their flow.\n\nAI isn’t a shortcut to security. But with the right practices — and a platform that meets developers where they are — it can absolutely be part of building software that’s fast, secure, and scalable. \n\n> Start your [free trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and experience what it’s like to build secure software, faster. With native security scanning, AI-powered insights, and a seamless developer experience, GitLab helps you shift security left without slowing down.","2025-07-10",[9,691],"security",{"featured":91,"template":693,"slug":694},"BlogPost","3-best-practices-for-building-software-in-the-era-of-llms","content:en-us:blog:3-best-practices-for-building-software-in-the-era-of-llms.yml","3 Best Practices For Building Software In The Era Of Llms","en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms.yml","en-us/blog/3-best-practices-for-building-software-in-the-era-of-llms",{"_path":700,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":701,"content":709,"config":718,"_id":720,"_type":14,"title":721,"_source":16,"_file":722,"_stem":723,"_extension":19},"/en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive",{"title":702,"description":703,"ogTitle":702,"ogDescription":703,"noIndex":6,"ogImage":704,"ogUrl":705,"ogSiteName":706,"ogType":707,"canonicalUrls":705,"schema":708},"5 things to know from our LinkedIn Live Security Deep Dive","Security experts and product leaders offered their take on new developments in application security and the latest from GitLab 17.5.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659856/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25.png","https://about.gitlab.com/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 things to know from our LinkedIn Live Security Deep Dive\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-10-28\",\n      }",{"title":702,"description":703,"authors":710,"heroImage":704,"date":712,"body":713,"category":691,"tags":714},[711],"Fatima Sarah Khalid","2024-10-28","[GitLab's October LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7255246777077936128) brought together security experts and product leaders to discuss the latest developments in application security and highlight key features from the GitLab 17.5 release. In case you missed it, here's what you need to know.\n\n## 1. Software is moving faster and security is struggling to keep up\nDevelopment teams are shipping at record speeds, but their security counterparts are finding it difficult to meet that pace. Our [DevSecOps survey](https://about.gitlab.com/developer-survey/) revealed that 66% of companies are shipping code twice as fast as last year, while 55% of security teams are finding vulnerabilities after code is merged to test environments. With 80% of top data breaches coming from application layer attacks, this gap must be addressed.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023367700?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Market Insights\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 2. Advanced SAST is getting smarter\nGitLab's new [Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/) capabilities are a game-changer for security testing. Built on technology acquired from Oxeye, Advanced SAST offers cross-file and cross-function scanning with taint analysis. The star feature is a code flow view that lets developers trace vulnerabilities from source to sink, making it easier to understand and fix security issues.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023369304?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Advanced SAST\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> Learn even more with our [Advanced SAST tutorial](https://about.gitlab.com/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai/).\n\n## 3. Accidental secret commits are a thing of the past\nGitLab's new [secret push protection feature](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/) stops sensitive information from reaching your GitLab repository by checking the contents of each commit. Instead of dealing with the aftermath of exposed credentials, the system catches secrets before they're committed, saving security teams countless hours of remediation work.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023370222?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Secret Push\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 4. AI is a security catalyst\nAI isn't just for code completion anymore. GitLab Duo has evolved to understand merge requests and provide contextual security assistance. With the new Quick Chat feature (accessible via Alt+C), developers can get security insights without leaving their editor.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023385333?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Security\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## 5. Static reachability reduces security noise\nThe new static reachability feature for Python and Java helps teams focus on vulnerabilities that matter. By identifying which dependencies are actually used in your code, it reduces false positives and helps teams prioritize real security threats.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1023388137?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Static Reachability\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand now\n\n[Watch the full \"Security Deep Dive\" recording](https://www.linkedin.com/feed/update/urn:li:activity:7255246777077936128) to see these features in action and hear more insights from our security experts.\n\nBe sure to follow GitLab on LinkedIn to be notified of our monthly broadcasts and get more insights and the latest news about AI-powered DevSecOps.",[9,691,715,716,717],"webcast","DevSecOps","features",{"slug":719,"featured":91,"template":693},"5-things-to-know-from-our-linkedin-live-security-deep-dive","content:en-us:blog:5-things-to-know-from-our-linkedin-live-security-deep-dive.yml","5 Things To Know From Our Linkedin Live Security Deep Dive","en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive.yml","en-us/blog/5-things-to-know-from-our-linkedin-live-security-deep-dive",{"_path":725,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":726,"content":731,"config":739,"_id":741,"_type":14,"title":742,"_source":16,"_file":743,"_stem":744,"_extension":19},"/en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"title":727,"description":728,"ogTitle":727,"ogDescription":728,"noIndex":6,"ogImage":704,"ogUrl":729,"ogSiteName":706,"ogType":707,"canonicalUrls":729,"schema":730},"5 videos and interactive tours to learn GitLab Duo fast","Get to know GitLab Duo's capabilities and benefits, and use these visual learning tools to understand how to incorporate AI throughout your software development lifecycle.\n","https://about.gitlab.com/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 videos and interactive tours to learn GitLab Duo fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-08-28\",\n      }",{"title":727,"description":728,"authors":732,"heroImage":704,"date":734,"body":735,"category":10,"tags":736},[733],"Cesar Saavedra","2024-08-28","GitLab Duo is a suite of AI-powered features designed to assist DevSecOps teams throughout the software development lifecycle. Integrated seamlessly into the GitLab platform, GitLab Duo leverages artificial intelligence to enhance productivity, improve code quality, and streamline various development and security processes. This article introduces you to GitLab Duo's capabilities and benefits, and lists five videos and interactive tours to help you learn how to incorporate this AI powerhouse into your own workflow.\n\nIn this article:\n- [GitLab Duo features](#gitlab-duo-features)\n- [Benefits of GitLab Duo](#benefits-of-gitlab-duo)\n- [5 videos and interactive tours](#5-videos-and-interactive-tours-to-learn-gitlab-duo)\n\n## GitLab Duo features\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) offers a wide range of AI-powered capabilities to help you ship more secure software faster and deliver better results for your customers.\n\n### Feature development\n\n- **Code Suggestions:** Helps developers write code more efficiently by generating code and showing suggestions as they type.\n\n- **Chat:** A conversational interface that answers questions and assists with various tasks throughout the development process.\n\n- **Code Explanation:** Helps understand selected code by providing clear explanations.\n\n- **GitLab Duo for the CLI:** Helps discover or recall Git commands when and where you need them.\n\n-  **Merge Commit Message Generation:** Helps merge more quickly by generating meaningful commit messages.\n\n- **Test Generation:** Helps catch bugs early by automatically generating tests for selected code.\n\n### Securing applications\n\n- **Vulnerability Explanation:** Shows information about security vulnerabilities in code and explains how to fix them.\n\n- **Vulnerability Resolution:** Helps resolve a vulnerability by generating a merge request that addresses it. (Beta)\n\n### Facilitating collaboration\n\n- **AI Impact Dashboard:** Measures the effectiveness and impact of AI on software development lifecycle metrics.\n\n- **Code Review Summary:** Makes merge request handover to reviewers easier by summarizing all the comments in a merge request review. (Experimental)\n\n- **Discussion Summary:** Helps everyone get up to speed by summarizing lengthy conversations in an issue. (Beta)\n\n- **Issue Description Generation:** Helps populate an issue quickly by generating a more in-depth description based on a short summary. (Experimental)\n\n- **Merge Request Summary:** Helps populate a merge request more quickly by generating a description based on the code changes. (Beta)\n\n- **Product Analytics:** Processes and responds to questions about your application's usage data.\n\n### Advanced troubleshooting\n\n- **Root Cause Analysis:** Helps determine the cause of CI/CD job failures by analyzing logs.\n\nThese components work together to provide comprehensive AI-assisted support throughout the software development lifecycle.\n\n## Benefits of GitLab Duo\n\nGitLab Duo offers numerous benefits to development teams and organizations. By integrating AI-powered assistance throughout the development lifecycle, it helps increase productivity, improve code quality, and enhance security. \nDevelopers can write code faster, understand complex codebases more easily, and catch potential issues earlier in the development process.\n\nGitLab Duo also helps streamline collaboration, speed up code reviews, and provide valuable insights into the impact of AI on ROI metrics. These benefits contribute to faster delivery of high-quality, secure software.\n\n## 5 videos and interactive tours to learn GitLab Duo\n\nTo help you get acquainted with GitLab Duo and its capabilities quickly, we've compiled a list of five videos and interactive tours. These visual learning tools provide an in-depth look at an array of GitLab Duo features and demonstrate how they can be integrated into your development workflow.\n\n__1. GitLab Duo Overview__\n\nThis comprehensive video introduces the core concepts of GitLab Duo and showcases its integration within the GitLab platform.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/o2xmLTV1y0I?si=90yPCHS_x2zSBAqe\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__2. Code Suggestions in Action__\n\nAn interactive tour demonstrating how GitLab Duo Code Suggestions works in real-time, helping developers write code more efficiently.\n\n\u003Ca href=\"https://gitlab.navattic.com/code-suggestions\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175911/Blog/b5gdnls7jdyrpeyjby5j.png\" alt=\"GitLab Duo Code Suggestions cover image\">\u003C/a>\n\n__3. Vulnerability Resolution Walkthrough__\n\nThis video guide takes you through the process of using GitLab Duo to understand and resolve security vulnerabilities in your code.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=cUmRiQNJbrv5Yd9D\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__4. Chat Demo__\n\nAn interactive session showing how developers can leverage GitLab Duo Chat to get answers, generate code, and solve problems throughout the development process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"\nhttps://www.youtube.com/embed/RJezT5_V6dI?si=QomHCGUKstnAwplM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__5. AI Impact Dashboard Tutorial__\n\nA detailed look at how to use and interpret the AI Impact Dashboard to measure the effectiveness of GitLab Duo in your development processes.\n\n\u003Ca href=\"https://gitlab.navattic.com/ai-impact\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175921/Blog/hn7gflmqswrjb33unuja.png\" alt=\"GitLab Duo AI Impact Dashboard cover image\">\u003C/a>\u003C/p>\n\n## Get started with GitLab Duo today\n\nThese videos and interactive tours offer practical insights into how GitLab Duo can enhance your development workflow. By exploring these resources, you'll gain a better understanding of how to leverage AI-powered assistance to improve productivity, code quality, and security in your projects.\n\n> #### Become a GitLab Duo expert: [Start your free trial today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n\n## Read more\n- [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n- [Refactor code into modern languages with AI-powered GitLab Duo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n- [Developing GitLab Duo blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n- [Mastering GitLab admin tasks with GitLab Duo Chat](https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat/)",[9,717,737,738],"tutorial","product",{"slug":740,"featured":6,"template":693},"5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","content:en-us:blog:5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","5 Videos And Interactive Tours To Learn Gitlab Duo Fast","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"_path":746,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":747,"content":753,"config":760,"_id":762,"_type":14,"title":763,"_source":16,"_file":764,"_stem":765,"_extension":19},"/en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"title":748,"description":749,"ogTitle":748,"ogDescription":749,"noIndex":6,"ogImage":750,"ogUrl":751,"ogSiteName":706,"ogType":707,"canonicalUrls":751,"schema":752},"5 ways GitLab Duo Chat AI can supercharge product management","Discover how to transform all aspects of product management, boosting efficiency and improving decision-making. Learn practical tips for leveraging AI throughout your PM workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666405/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B__1_.png","https://about.gitlab.com/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways GitLab Duo Chat AI can supercharge product management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-09-25\",\n      }",{"title":748,"description":749,"authors":754,"heroImage":750,"date":756,"body":757,"category":10,"tags":758},[755],"Tim Rizzi","2024-09-25","As a product manager at GitLab, I constantly seek ways to enhance my productivity and decision-making. Recently, I discovered an unexpected ally in [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/). Let me share how this AI-powered assistant has transformed my approach to product management.\n\n## The daily PM challenges\n\nLike many PMs, I juggle many daily tasks — from summarizing issues and merge requests to crafting detailed product specs and investment cases. The sheer volume of writing and analysis required was overwhelming, and I worried about potential cognitive biases influencing my work.\n\nTo address these challenges, I needed to:\n\n1. Increase my efficiency in handling documentation tasks.\n2. Enhance the quality and objectivity of my product decisions.\n3. Improve my communication with various stakeholders.\n\n## Leveraging GitLab Duo Chat\n\nI decided to experiment with GitLab Duo Chat as a support tool for my daily PM tasks. Here's how I incorporated it into my workflow, with real examples:\n\n### 1. Issue creation and refinement\n\nI was tasked with creating an issue for a new feature that would enhance the Conan repository by adding [revision support](https://gitlab.com/gitlab-org/gitlab/-/issues/479437). To start, I prompted Chat: \"Can you create an issue to add support for downloading Conan revisions to the GitLab package registry? Think about the product's value from a C++ developer and a platform engineer perspective.\"\n\nGitLab Duo provided a comprehensive draft issue, including:\n\n* a clear description of the feature\n* value propositions for both C++ developers and platform engineers\n* implementation details\n* acceptance criteria\n* related links and labels\n\nThis gave me a solid starting point with all the necessary sections, which I refined and customized. Instead of spending an hour writing that issue, I spent more time thinking about how this feature fits within the broader GitLab strategy.\n\n### 2. Summarizing and reviewing\n\nI often ask GitLab Duo to summarize lengthy merge requests or complicated epics. For instance, when reviewing the epic for [protected container images](https://gitlab.com/groups/gitlab-org/-/epics/9825), I asked GitLab Duo to summarize the key changes and their value for GitLab customers and users from a PM perspective.\n\nGitLab Duo provided a detailed summary, highlighting the following:\n\n* enhanced security and compliance features\n* improved governance and control\n* streamlined operations\n* increased confidence in CI/CD pipelines\n* better alignment with DevOps practices\n* customizable security posture\n* improved audibility\n* cost-effective security solution\n\nThis summary helped me quickly grasp and share the key points with my team more effectively.\n\n### 3. Project status updates\n\nTo get a quick overview of a project's status, I asked GitLab Duo to provide an update \"like a hyper-focused project manager.\" The response included:\n\n* overall progress\n* completed items\n* in-progress tasks\n* next steps\n* timeline\n* risks and issues\n* stakeholder input\n* action items\n* key performance indicators\n* communication plans\n\nThis structured overview allowed me to quickly assess the project's status and identify areas needing attention.\n\n### 4. Value proposition and metric analysis\n\nWhen I needed to articulate the value of measuring monthly active users and storage costs for the Package stage, I asked GitLab Duo for help. The response provided a comprehensive explanation, covering:\n\n* user engagement and adoption insights\n* feature prioritization\n* capacity planning\n* business model optimization\n* customer success indicators\n* cost management\n* competitive positioning\n* product health assessment\n* ROI calculation\n* future planning considerations\n\nThis well-structured response gave me more than enough content and helped me better articulate the value of these essential metrics.\n\n### 5. Challenging cognitive biases\n\nTo reveal blind spots in my thinking, I often ask GitLab Duo to answer in specific personas, such as:\n\n* a hyper-focused project manager\n* a frustrated customer\n* a developer who doesn't have time to read issues\n* a product leader who demands excellence\n\nFor example, when I created an investment case for GitLab Package, I asked GitLab Duo to review it as a hypercritical CEO. This perspective helped me consider including financial projections and competitive analysis in my proposal, which I had initially overlooked.\n\n## A more efficient and effective PM\n\nThe impact of integrating GitLab Duo Chat into my workflow has been significant:\n\n1. **Increased productivity:** Tasks that used to take hours now often take minutes. Creating the initial draft of the Conan issue took about five minutes with GitLab Duo, compared to the usual 30-45 minutes I'd spend starting from scratch.\n2. **Enhanced quality:** The initial drafts produced with GitLab Duo's help are more comprehensive and structured. For the protected container images project, GitLab Duo's input helped me more effectively summarize the value of my go-to-market strategy and the project's current status.\n3. **Improved decision-making:** I've created more robust, well-rounded proposals using GitLab Duo to challenge my assumptions. The critique of my investment case led to a more thorough cost-benefit analysis.\n4. **Continuous improvement:** The feedback loop of writing, getting GitLab Duo's input, and refining has helped me improve my writing and analytical skills. My first drafts are becoming stronger, even without GitLab Duo's assistance.\n\n## A new era of AI-assisted product management\n\nWhile GitLab Duo Chat hasn't replaced my role as a PM, it has become an invaluable tool in my arsenal. It's helped me be more efficient, thorough, and objective. As AI assistants like GitLab Duo continue to evolve, I'm excited about the potential for further enhancing our product management practices.\n\nHowever, it's crucial to remember that GitLab Duo is a tool, not a replacement for human insight and creativity. The best results come from combining GitLab Duo's capabilities with our expertise and understanding of our unique business context.\n\n## Try GitLab Duo\n\nI encourage fellow PMs to explore how AI assistants like GitLab Duo Chat can augment their work. Here are some steps you can take:\n\n1. **Start small:** Use GitLab Duo for simple tasks like summarizing issues or drafting initial proposals.\n2. **Experiment with personas:** Ask GitLab Duo to review your work from different perspectives to uncover blind spots.\n3. **Refine your prompts:** Learn how to craft effective prompts to get the most valuable responses from GitLab Duo.\n4. **Share your experiences:** Discuss your use of AI tools with your team and contribute to best practices.\n\nWith the right approach, these tools can help us focus more on strategic thinking and less on routine tasks, ultimately leading to better products and happier customers.\n\n> [Try GitLab Duo today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n",[9,738,759],"workflow",{"slug":761,"featured":91,"template":693},"5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","content:en-us:blog:5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","5 Ways Gitlab Duo Chat Ai Can Supercharge Product Management","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"_path":767,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":768,"content":772,"config":780,"_id":782,"_type":14,"title":783,"_source":16,"_file":784,"_stem":785,"_extension":19},"/en-us/blog/a-developer-s-guide-to-building-secure-retail-apps-with-gitlab",{"config":769,"title":770,"description":771},{"noIndex":6},"A developer's guide to building secure retail apps with GitLab","Learn how a DevSecOps platform helps retailers develop secure, compliant software for complex, high-traffic retail environments.",{"title":770,"description":771,"authors":773,"body":776,"category":691,"tags":777,"date":778,"heroImage":779},[774,775],"Itzik Gan Baruch","Rebeca Fenoy-Anthony","Retailers often find application security challenging — in large part because the **attack surface is broader than ever** due to the complexity of modern commerce. From mobile apps and AI-powered personalization to omni-channel platforms and in-store IoT, every touchpoint increases the number of systems that must be secured and monitored. A single vulnerability doesn’t just affect one component, it can cascade across payment processors, inventory systems, customer data, and ultimately, brand trust.\n\nTraditional security approaches that once worked in simpler retail environments now struggle to keep up. Security processes are often bolted on as an afterthought, slowing teams down and increasing risk. But it doesn’t have to be this way.\n\n**Modern platforms embed security throughout the development lifecycle,** making protection a seamless part of the developer workflow, not a barrier to delivery. This approach turns security into a strategic advantage, enabling innovation without compromising resilience.\n\nIn this article, you'll discover how an integrated DevSecOps platform helps retail teams meet rising security demands **without slowing down delivery or compromising customer experience**.\n\n## Why retail security demands a different approach\n\nIn retail, security is about more than protecting data — it’s about protecting the customer experience that drives revenue. Any slowdown, outage, or vulnerability can lead to lost sales and broken trust. Retail platforms must stay online, meet compliance standards, and defend against nonstop attacks from the open internet. Unlike enterprise systems, they’re fully public-facing, with a much broader attack surface. Add in third-party integrations, APIs, and legacy systems, and it’s clear: traditional security approaches aren’t enough.\n\nAdding to the complexity, retailers face a unique set of challenges that further increase their security risks, including:\n\n### Supply chain fragility and API sprawl\n\nShipping delays, global instability, and interconnected systems disrupt logistics. Nearly half of retailers report product availability issues, and 25% lack real-time inventory visibility, according to a [2024 Fluent Commerce survey](https://premierconstructionnews.com/2024/05/25/retails-revival-fluent-commerce-study-finds-93-of-uk-retailers-expect-business-growth-over-next-year-despite-economic-challenges-and-supply-chain-disruption/). While AI-powered forecasting helps, insecure APIs and fragile integrations across the digital supply chain create attack vectors.\n\n### Legacy systems meet modern demands\n\nMany retailers operate on monolithic, outdated systems that struggle to support mobile apps, IoT devices, and real-time analytics securely. Without secure, agile foundations, each new digital touchpoint becomes a potential vulnerability.\n\n### AI and compliance complexity\n\nAI reshapes retail experiences through personalized recommendations and advanced customer tracking technologies like beacon sensors, facial recognition, and mobile app location services that monitor movement and behavior within physical stores. These AI-powered systems enhance both customer experiences and demand forecasting capabilities for retailers. However, [GDPR](https://gdpr.eu/what-is-gdpr/) (the European Union's General Data Protection Regulation) and similar global privacy laws require secure data handling and transparent AI logic. Security missteps can result in significant fines and lasting reputational damage.\n\n### Customer-facing automation risks\n\nSelf-checkouts, kiosks, and chatbots promise convenience and cost savings but often lack security hardening. These touchpoints become entry points for cyber attackers and enable traditional theft through weak fraud detection, limited monitoring, and easily manipulated systems that make shoplifting harder to detect.\n\n### Disparate threat surfaces\n\nRetailers are in a unique position where they must secure across multiple vectors, often maintained by globally distributed teams (depending on the size of the organization). E-commerce platforms, mobile applications, point-of-sale (POS) systems, and in-store IoT devices all provide an entry point for threat actors with unique characteristics requiring different security solutions to ensure resiliency.\n\nThis creates a unique paradox: Retailers must innovate faster than ever while maintaining higher security standards than most industries, all while delivering seamless customer experiences across every channel.\n\n## Why traditional AppSec falls short in retail\n\nMost retailers rely on disconnected security tools such as static application security testing (SAST) scanners, license checkers, and vulnerability assessments that work in isolation. This fragmented approach creates critical gaps:\n\n* **Limited lifecycle coverage:** Tools focus on narrow development phases, missing supply chain and runtime risks.\n \n* **Integration challenges:** Legacy system gaps and poor tool connectivity create security blind spots between teams and solutions.\n  \n* **Manual processes:** Security handoffs create bottlenecks, and issues are often discovered late, when they’re more costly to fix.\n  \n* **Team silos:** Security remains isolated from daily development workflows and separate from compliance and IT teams.\n\n### The path forward\n\nIn today’s fast-paced retail landscape, security can’t slow down innovation. Embedding it directly into the development lifecycle and bringing every team together on a single unified DevSecOps platform makes security a strategic advantage rather than a bottleneck.\n\n### A DevSecOps platform enables secure innovation at scale\n\nGitLab provides the most comprehensive set of security scanners to maximize application coverage, including:\n\n* [SAST](https://docs.gitlab.com/user/application_security/sast/)  \n* [DAST](https://docs.gitlab.com/user/application_security/dast/)  \n* [Dependency scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/dependency_scanning_sbom/) \n* [Container scanning](https://docs.gitlab.com/user/application_security/container_scanning/)  \n* [Secret detection](https://docs.gitlab.com/user/application_security/secret_detection/)  \n* [Infrastructure-as-code scanning](https://docs.gitlab.com/user/application_security/iac_scanning/)  \n* [Fuzz testing](https://docs.gitlab.com/user/application_security/api_fuzzing/)\n\nBut security isn’t just about scanning. It's about [enforcing the right policies](https://docs.gitlab.com/user/compliance/compliance_frameworks/) to ensure vulnerabilities are identified and remediated consistently. With GitLab, security teams get full control to ensure the right scan is run on the right application, at the right time, and that the findings are addressed before they reach production.\n\n![Security scans in pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/x2dteagn1z8tjfahmobv.png)\n\n\u003Ccenter>\u003Ci>Security scans run in the CI/CD pipeline, ensuring immediate feedback on potential vulnerabilities.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n![Vulnerability Report shows all vulnerabilities for a specific project or group.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/npsgvu5e0sd2kpoxug7f.png)\n\n\u003Ccenter>\u003Ci>Vulnerability Report shows all vulnerabilities for a specific project or group.\u003C/i>\u003C/center>\n\n### One platform for Dev, Sec, and Ops\n\nRetail teams waste countless hours switching between tools, manually transferring data, losing information between systems due to fragile integrations, and reconciling conflicting reports. A unified platform eliminates this friction:\n\n* **Single source of truth** for source code, pipelines, vulnerabilities, and compliance  \n* **No integration overhead** or tool compatibility issues  \n* **Consistent workflows** across all teams and projects\n\nThe result? Teams spend time solving problems instead of managing tools.\n\n![Compliance center where you can enforce compliance frameworks for your projects.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988739/d2nzltd1a2gypywzhv5f.png)\n\n\u003Ccenter>\u003Ci>The compliance center is where you can enforce compliance frameworks for your projects.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n\n![In the merge request, developers require approval if risks are detected before merging code, according to defined policies.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/szoocztazaup2avkaxhu.png)\n\n\u003Ccenter>\u003Ci>In the merge request, developers require approval if risks are detected before merging code, according to defined policies.\u003C/i>\u003C/center>\n\n### Shared security responsibility, not silos\n\nThe most successful retail security programs make security everyone's responsibility, not just the security team's burden.\n\n**Developer empowerment**\n\nSecurity and compliance guidance appears directly in merge requests, making it impossible to miss critical issues. Developers get immediate feedback on each commit, with clear explanations of risks and remediation steps. For example,  AI-powered vulnerability explanation and vulnerability resolution help developers understand and fix security issues independently, reducing bottlenecks and building security expertise across the team.\n\n![Vulnerability page with a button for explaining or resolving issues with AI. Helps to bridge the knowledge gap with AI.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988741/uenrjye3arfg9wjtwss1.png)\n\n\u003Ccenter>\u003Ci>Vulnerability page with a button for explaining or resolving issues with AI. Helps to bridge the knowledge gap with AI.\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\n**Automated compliance**\n\nGenerate audit reports, track license usage, and maintain a software bill of materials (SBOM) without manual effort.\n\n![GitLab's automated dependency report provides a comprehensive SBOM), displaying all project dependencies with their vulnerability status, license details, and security findings for complete transparency and compliance.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988739/gpakhdvkegloqxhaeje8.png)\n\n\u003Ccenter>\u003Ci>GitLab's automated dependency report provides a comprehensive SBOM), displaying all project dependencies with their vulnerability status, license details, and security findings for complete transparency and compliance.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThis approach transforms security from a gate that slows delivery into a foundation that enables confident, rapid innovation.\n\n## Platform vs. point tools: What retailers need to know\n\n| Capability | Point Tools | GitLab DevSecOps Platform |\n|------------|-------------|---------------------------|\n| SAST/DAST/API/Fuzz | Separate & limited | Fully integrated |\n| License & dependency scanning | Often external tools | Built-in |\n| Compliance & audit reporting | Manual or disconnected | Automated with traceability |\n| Collaboration across teams | Fragmented | Unified environment |\n| End-to-end visibility | Tool-specific | Full lifecycle + value stream view |\n\n## The bottom line: Security excellence drives retail success\n\nIn retail, security isn't just about protecting data, it's about protecting the customer experience that drives revenue. When security slows down releases or creates vulnerabilities, it directly impacts sales. Your customers expect secure, seamless experiences every time.\n\nGitLab's integrated DevSecOps platform helps retailers:\n\n* **Deploy faster without compromising security** with automated scans that catch issues before customers do.  \n* **Meet compliance requirements effortlessly** through built-in reporting for GDPR, PCI-DSS, and industry standards.  \n* **Significantly reduce security tool costs** by replacing multiple point solutions with one platform.  \n* **Turn developers into security advocates** with guidance and automation, not roadblocks.\n\nTake a tour of some of GitLab's security capabilities:\n\n* [Resolving vulnerabilities with Duo](https://gitlab.navattic.com/ve-vr-short)   \n* [Adding scans to pipeline](https://gitlab.navattic.com/gitlab-scans)  \n* [Compliance frameworks](https://gitlab.navattic.com/compliance-short)  \n* [Advanced SAST](https://gitlab.navattic.com/advanced-sast-short)\n\n> Ready to get started? Discover how GitLab Ultimate with Duo Enterprise can streamline your retail security strategy with [a free trial](https://about.gitlab.com/free-trial/).",[691,738,717,9],"2015-09-04","https://res.cloudinary.com/about-gitlab-com/image/upload/v1756989645/fojzxakmfdea6jfqjkrl.png",{"featured":6,"template":693,"slug":781},"a-developer-s-guide-to-building-secure-retail-apps-with-gitlab","content:en-us:blog:a-developer-s-guide-to-building-secure-retail-apps-with-gitlab.yml","A Developer S Guide To Building Secure Retail Apps With Gitlab","en-us/blog/a-developer-s-guide-to-building-secure-retail-apps-with-gitlab.yml","en-us/blog/a-developer-s-guide-to-building-secure-retail-apps-with-gitlab",{"_path":787,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":788,"content":794,"config":800,"_id":802,"_type":14,"title":803,"_source":16,"_file":804,"_stem":805,"_extension":19},"/en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework",{"title":789,"description":790,"ogTitle":789,"ogDescription":790,"noIndex":6,"ogImage":791,"ogUrl":792,"ogSiteName":706,"ogType":707,"canonicalUrls":792,"schema":793},"A developer's guide to building an AI security governance framework","Learn the strategies and practices to adopt for secure and responsible development and use of AI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664638/Blog/Hero%20Images/applicationsecurity.png","https://about.gitlab.com/blog/a-developers-guide-to-building-an-ai-security-governance-framework","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A developer's guide to building an AI security governance framework\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2024-04-23\",\n      }",{"title":789,"description":790,"authors":795,"heroImage":791,"date":797,"body":798,"category":10,"tags":799},[796],"Ayoub Fandi","2024-04-23","Artificial Intelligence (AI) has firmly established itself as a pillar of digital transformation, disrupting industries, increasing efficiency, and providing unmatched access to large data sets. AI also raises profound questions regarding security governance. How do I ensure I can leverage the best of what AI has to offer while mitigating its potential security risks? As [AI continues to advance](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/), there is a growing need for strong oversight and accountability. This article delves into the complex landscape of AI security governance, exploring various frameworks, strategies, and practices that organizations like GitLab are adopting to ensure the responsible development of AI technologies and features.\n\n## Greater scrutiny on AI\n\n### AI: Single term, numerous realities\nAI isn't a monolithic entity - it encompasses a spectrum of technologies and applications. From machine learning algorithms that power recommendation systems to advanced natural language processing models like Anthropic’s Claude 3, each AI system brings its unique set of opportunities and challenges.\n\nAccording to [a 2023 MITRE report](https://www.mitre.org/sites/default/files/2023-06/PR-23-1943-A-Sensible-Regulatory-Framework-For-AI-Security_0.pdf), three main areas of AI currently exist:\n\n1. **AI as a subsystem**\n\n\u003Cp>\u003C/p>\u003Ci>\"AI is embedded in many software systems. Discrete AI models routinely perform machine perception and optimization functions, from face recognition in photos uploaded to the cloud, to dynamically allocating and optimizing network resources in 5G wireless networks.\n  \u003Cp>\u003C/p>\n\"There are a wide range of vulnerabilities and threats against these types of AI subsystems – from data poisoning attacks to adversarial input attacks – that can be used to manipulate subsystems.\"\u003C/i>\u003Cp>\u003C/p>\n\n2. **AI as human augmentation**\n\u003Cp>\u003C/p>\u003Ci>\"Another application of AI is in augmenting human performance, allowing a person to operate with much larger scope and scale. This has wide-ranging implications for workforce planning as AI has the potential to increase productivity and shift the composition of labor markets, similar to the role of automation in the manufacturing industry. \n  \u003Cp>\u003C/p>\n\"While sophisticated hackers and military information operations can already generate believable content today using techniques such as computer-generated imagery, LLMs will make that capability available to anyone, while increasing the scope and scale at which the professionals can operate.\"\u003C/i>\u003Cp>\u003C/p>\n\n3. **AI with agency**\n\u003Cp>\u003C/p>\u003Ci>\"A segment of the tech community is increasingly concerned about scenarios where sophisticated AI could operate as an independent, goal-seeking agent. While science fiction historically embodied this AI in anthropomorphic robots, the AI we have today is principally confined to digital and virtual domains.\n\u003Cp>\u003C/p>\n\"One scenario is an AI model given a specific adversarial agenda. Stuxnet is perhaps an early example of sophisticated, AI-fueled, goal-seeking malware with an arsenal of zero-day attacks that ended up escaping onto the internet.\"\u003C/i>\u003Cp>\u003C/p>\n\nYou can focus your efforts in terms of security governance based on which areas your company is looking to adopt and the expected business benefits.\u003Cp>\u003C/p>\n\n### Frameworks for AI security governance\nFor effective AI security governance, we must navigate the complex landscape of guidelines and principles developed by various organizations.\n\nGovernments, international organizations, and tech companies have all played their part in shaping AI security governance frameworks. You can review the frameworks below and choose those that are relevant and/or apply to your organization:\n\n- [NIST AI Risk Management Framework (AI RMF)](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.100-1.pdf)\n- [Google’s Security Artificial Intelligence Framework](https://services.google.com/fh/files/blogs/google_secure_ai_framework_approach.pdf)\n- [OWASP Top 10 for LLMs](https://owasp.org/www-project-top-10-for-large-language-model-applications/assets/PDF/OWASP-Top-10-for-LLMs-2023-v1_0.pdf)\n- [The UK’s NCSC Principles for the Security of Machine Learning](https://www.ncsc.gov.uk/files/Principles-for-the-security-of-machine-learning.pdf)\n\nWhile these frameworks provide valuable guidance, they also introduce complexity. Organizations must determine which apply to their AI usage and how they align to their practices. Moreover, the dynamic nature of AI requires continuous adaptation to stay secure.\n\nSomething to note is that if you read through these frameworks, you’ll notice that numerous controls overlap with standard security best practices. This isn’t a coincidence. A strong overall security program is a prerequisite for proper AI security governance.\n\n## How-to: AI security governance\n### The why and the what\nAI security governance starts with understanding what AI technologies your organization is using or developing, why you are using them, and where these technologies fit into your operations. It's essential to define clear objectives and identify potential security risks associated with AI deployment. This introspection lays the foundation for effective AI security governance.\n\n#### The why\n\nUnderstanding the \"why\" behind each AI application is pivotal to build effective security governance. Each AI system deployed has to serve a specific purpose. Is AI being utilized to enhance customer experiences, automate manual tasks, or support the decision-making process? \n\nBy uncovering the motivations driving AI initiatives, organizations can align these projects with their broader business objectives. This alignment ensures that AI investments are strategically focused, delivering value in line with organizational goals. It also aids in prioritizing AI systems that have a more significant impact on the core mission of the company.\n\n#### The what\nIn the realm of AI security governance, the foundational step is conducting a comprehensive inventory of all AI systems, algorithms, and data sources within your organization. This includes meticulously cataloging all AI technologies in use, ranging from machine learning models and natural language processing algorithms to computer vision systems. This would also involve identifying the data sources feeding these AI systems, and their origins (internal databases, customer interactions, or third-party data providers). Such an inventory provides three main benefits: \n- to gain a holistic understanding of the AI ecosystem within the organization \n- to establish a strong basis for monitoring, auditing, and managing these assets effectively\n- to focus security efforts on the high-risk/critical areas\n\n### How to develop a security risk management program\nA robust security risk management program is at the core of responsible AI security governance. The critical building blocks for this program are the what and the why we discussed earlier. \n\nSpecificities of AI make security risk management more complex. In the NIST AI RMF mentioned earlier, numerous challenges are highlighted, including:\n\n- Difficult to measure AI-related security risks\n    - Potential security risks could emerge from the AI model, the software on which you are training the model, or the data ingested by the model. Different stages of the AI lifecycle might also trigger specific security risks depending on which actors (producers, developers, or consumers) are leveraging the AI solution.\n- Risk tolerance threshold might be complex to determine \n    - As the potential security risks aren’t easily identifiable, determining the risk tolerance your organization can withstand regarding AI can be a very empirical exercise.\n- Not considering AI in isolation \n    - Security governance of AI systems should be part of your security risk management strategy. Different users might have different parts of the overall picture. Ensuring you have complete information and full visibility into the AI lifecycle is critical to making the best decisions.\n\nSecurity risk management should be an ongoing process, adapting to the quickly evolving AI landscape. Reassessing the program, reviewing assumptions regarding the environment and involving additional business stakeholders are activities that should be happening on a regular basis.\n\n## AI security governance and the GitLab DevSecOps platform\n### Using AI to power DevSecOps \nLet’s take [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities to help power DevSecOps workflows, as an example. [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) helps developers write code more efficiently by using generative AI to assist in software engineering tasks. It works either through code completion or through code generation using natural language code comment blocks.\n\nTo ensure it can be fully leveraged, security needs of potential users and customers have to be considered. As an example, data used to produce Code Suggestions is immediately discarded by the AI models. \n\nAll of GitLab’s AI providers are subject to contractual terms with GitLab that prohibit the use of customer content for the provider’s own purposes, except to perform their independent legal obligations. [GitLab’s own privacy policy](https://about.gitlab.com/privacy/) prevents us from using customer data to train models without customer consent. \n\nOf course, to fully benefit from Code Suggestions, you should:\n- understand and review all suggestions to see if they align with your development guidelines\n- limit providing sensitive information or proprietary code in prompts \nensure the suggestion follows the same secure coding guidelines your company has\n- review the code using automated scanning for vulnerable dependencies, input validation and output sanitization, as well as license checks\n\n### Securing AI\nManaging the output of AI systems is equally important as managing the input. Security scanning tools can help identify vulnerabilities and potential threats in AI-generated code. \n\nManaging AI output requires a systematic approach to code review and validation. Organizations should [integrate security scanning tools into their CI/CD pipelines](https://docs.gitlab.com/ee/user/application_security/), ensuring that AI-generated code is checked for security vulnerabilities before deployment. Automated security checks can help detect vulnerabilities early in the development process, reducing the risk of potential vulnerable code stemming from suggested code blocks being merged.\n\nFor any GitLab Duo generated code, changes are managed via merge requests which trigger your CI pipeline (including any security and code quality scanning you have configured). This ensures any governance rules you have set up for your merge requests like required approvals are enforced.\n\nAI systems are systems. Existing security controls apply to AI systems the same way they would apply to the rest of your environment. Common security controls around application security still apply, including [security reviews](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html), security scanning, [threat modeling](https://danielmiessler.com/p/athi-an-ai-threat-modeling-framework-for-policymakers), encryption, etc. The [Google Secure AI Framework](https://services.google.com/fh/files/blogs/google_secure_ai_framework_approach.pdf) highlights these six elements:\n- expand strong security foundations to the AI ecosystem\n- extend detection and response to bring AI into an organization’s threat universe\n- automate defenses to keep pace with existing and new threats\n- harmonize platform-level controls to ensure consistent security across the organization\n- adapt controls to adjust mitigations and create faster feedback loops for AI deployment\n- contextualize AI system risks in surrounding business processes\n\nIf you have a strong security program, managing AI will be an extension of your current program and account for specific risks and vulnerabilities.\n\n## How GitLab Duo is secured\nGitLab recognizes the significance of security in AI governance. Our very strong security program is focused on ensuring our customers can fully leverage [GitLab Duo](https://docs.gitlab.com/ee/user/ai_features.html) in a secure manner. This is how the security departments are collaborating to secure GitLab’s AI features GitLab:\n- **Security Assurance:** Seeks to address our compliance requirements regarding security, that AI security risks are identified and properly managed, and that our customers understand how we secure our application, infrastructure, and services.\n\n- **Security Operations:** Monitors our infrastructure and quickly responds to threats using a team of skilled engineers as well as automation capabilities, helping to ensure AI features aren’t abused or used in a malevolent manner.\n\n- **Product Security:** Helps the product and engineering teams by providing security expertise for our AI features and helping to secure the underlying infrastructure on which our product is hosted.\n\n- **Corporate Security and IT Operations:** Finds potential vulnerabilities in our product to proactively mitigate and support other departments by performing research on relevant security areas.\n\nOur Security team works closely with GitLab's Legal and Corporate Affairs team to ensure our framework for AI security governance is comprehensive. The recent launch of the [GitLab AI Transparency Center](https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center/) showcases our commitment to implementing a strong AI governance. We published our AI ethics principles as well as our AI continuity plan to demonstrate our AI resiliency.\n\n## Learn more\nAI security governance is a complex area, especially as the field is in a nascent form. As AI continues to support our workflows and accelerate our processes, responsible AI security governance becomes a key pillar of any security program. By understanding the nuances of AI, enhancing your risk management program, and using AI features that are developed responsibly, you can ensure that AI-powered workflows follow the principles of security, privacy, and trust. \n\n>  Learn more about [GitLab Duo AI features](https://about.gitlab.com/gitlab-duo/).\n",[9,716,691,186],{"slug":801,"featured":91,"template":693},"a-developers-guide-to-building-an-ai-security-governance-framework","content:en-us:blog:a-developers-guide-to-building-an-ai-security-governance-framework.yml","A Developers Guide To Building An Ai Security Governance Framework","en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework.yml","en-us/blog/a-developers-guide-to-building-an-ai-security-governance-framework",{"_path":807,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":808,"content":810,"config":815,"_id":817,"_type":14,"title":818,"_source":16,"_file":819,"_stem":820,"_extension":19},"/en-us/blog/a-developers-guide-to-building-secure-retail-apps-with-gitlab",{"config":809,"title":770,"description":771},{"noIndex":6},{"title":770,"description":771,"authors":811,"body":812,"category":691,"tags":813,"date":814,"heroImage":779},[774,775],"Retailers often find application security challenging — in large part because the **attack surface is broader than ever** due to the complexity of modern commerce. From mobile apps and AI-powered personalization to omni-channel platforms and in-store IoT, every touchpoint increases the number of systems that must be secured and monitored. A single vulnerability doesn’t just affect one component, it can cascade across payment processors, inventory systems, customer data, and ultimately, brand trust.\n\nTraditional security approaches that once worked in simpler retail environments now struggle to keep up. Security processes are often bolted on as an afterthought, slowing teams down and increasing risk. But it doesn’t have to be this way.\n\n**Modern platforms embed security throughout the development lifecycle,** making protection a seamless part of the developer workflow, not a barrier to delivery. This approach turns security into a strategic advantage, enabling innovation without compromising resilience.\n\nIn this article, you'll discover how an integrated DevSecOps platform helps retail teams meet rising security demands **without slowing down delivery or compromising customer experience**.\n\n## Why retail security demands a different approach\n\nIn retail, security is about more than protecting data — it’s about protecting the customer experience that drives revenue. Any slowdown, outage, or vulnerability can lead to lost sales and broken trust. Retail platforms must stay online, meet compliance standards, and defend against nonstop attacks from the open internet. Unlike enterprise systems, they’re fully public-facing, with a much broader attack surface. Add in third-party integrations, APIs, and legacy systems, and it’s clear: traditional security approaches aren’t enough.\n\nAdding to the complexity, retailers face a unique set of challenges that further increase their security risks, including:\n\n### Supply chain fragility and API sprawl\n\nShipping delays, global instability, and interconnected systems disrupt logistics. Nearly half of retailers report product availability issues, and 25% lack real-time inventory visibility, according to a [2024 Fluent Commerce survey](https://premierconstructionnews.com/2024/05/25/retails-revival-fluent-commerce-study-finds-93-of-uk-retailers-expect-business-growth-over-next-year-despite-economic-challenges-and-supply-chain-disruption/). While AI-powered forecasting helps, insecure APIs and fragile integrations across the digital supply chain create attack vectors.\n\n### Legacy systems meet modern demands\n\nMany retailers operate on monolithic, outdated systems that struggle to support mobile apps, IoT devices, and real-time analytics securely. Without secure, agile foundations, each new digital touchpoint becomes a potential vulnerability.\n\n### AI and compliance complexity\n\nAI reshapes retail experiences through personalized recommendations and advanced customer tracking technologies like beacon sensors, facial recognition, and mobile app location services that monitor movement and behavior within physical stores. These AI-powered systems enhance both customer experiences and demand forecasting capabilities for retailers. However, [GDPR](https://gdpr.eu/what-is-gdpr/) (the European Union's General Data Protection Regulation) and similar global privacy laws require secure data handling and transparent AI logic. Security missteps can result in significant fines and lasting reputational damage.\n\n### Customer-facing automation risks\n\nSelf-checkouts, kiosks, and chatbots promise convenience and cost savings but often lack security hardening. These touchpoints become entry points for cyber attackers and enable traditional theft through weak fraud detection, limited monitoring, and easily manipulated systems that make shoplifting harder to detect.\n\n### Disparate threat surfaces\n\nRetailers are in a unique position where they must secure across multiple vectors, often maintained by globally distributed teams (depending on the size of the organization). E-commerce platforms, mobile applications, point-of-sale (POS) systems, and in-store IoT devices all provide an entry point for threat actors with unique characteristics requiring different security solutions to ensure resiliency.\n\nThis creates a unique paradox: Retailers must innovate faster than ever while maintaining higher security standards than most industries, all while delivering seamless customer experiences across every channel.\n\n## Why traditional AppSec falls short in retail\n\nMost retailers rely on disconnected security tools such as static application security testing (SAST) scanners, license checkers, and vulnerability assessments that work in isolation. This fragmented approach creates critical gaps:\n\n* **Limited lifecycle coverage:** Tools focus on narrow development phases, missing supply chain and runtime risks.\n \n* **Integration challenges:** Legacy system gaps and poor tool connectivity create security blind spots between teams and solutions.\n  \n* **Manual processes:** Security handoffs create bottlenecks, and issues are often discovered late, when they’re more costly to fix.\n  \n* **Team silos:** Security remains isolated from daily development workflows and separate from compliance and IT teams.\n\n### The path forward\n\nIn today’s fast-paced retail landscape, security can’t slow down innovation. Embedding it directly into the development lifecycle and bringing every team together on a single unified DevSecOps platform makes security a strategic advantage rather than a bottleneck.\n\n### A DevSecOps platform enables secure innovation at scale\n\nGitLab provides the most comprehensive set of security scanners to maximize application coverage, including:\n\n* [SAST](https://docs.gitlab.com/user/application_security/sast/)  \n* [DAST](https://docs.gitlab.com/user/application_security/dast/)  \n* [Dependency scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/dependency_scanning_sbom/) \n* [Container scanning](https://docs.gitlab.com/user/application_security/container_scanning/)  \n* [Secret detection](https://docs.gitlab.com/user/application_security/secret_detection/)  \n* [Infrastructure-as-code scanning](https://docs.gitlab.com/user/application_security/iac_scanning/)  \n* [Fuzz testing](https://docs.gitlab.com/user/application_security/api_fuzzing/)\n\nBut security isn’t just about scanning. It's about [enforcing the right policies](https://docs.gitlab.com/user/compliance/compliance_frameworks/) to ensure vulnerabilities are identified and remediated consistently. With GitLab, security teams get full control to ensure the right scan is run on the right application, at the right time, and that the findings are addressed before they reach production.\n\n![Security scans in pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/x2dteagn1z8tjfahmobv.png)\n\n\u003Ccenter>\u003Ci>Security scans run in the CI/CD pipeline, ensuring immediate feedback on potential vulnerabilities.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n![Vulnerability Report shows all vulnerabilities for a specific project or group.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/npsgvu5e0sd2kpoxug7f.png)\n\n\u003Ccenter>\u003Ci>Vulnerability Report shows all vulnerabilities for a specific project or group.\u003C/i>\u003C/center>\n\n### One platform for Dev, Sec, and Ops\n\nRetail teams waste countless hours switching between tools, manually transferring data, losing information between systems due to fragile integrations, and reconciling conflicting reports. A unified platform eliminates this friction:\n\n* **Single source of truth** for source code, pipelines, vulnerabilities, and compliance  \n* **No integration overhead** or tool compatibility issues  \n* **Consistent workflows** across all teams and projects\n\nThe result? Teams spend time solving problems instead of managing tools.\n\n![Compliance center where you can enforce compliance frameworks for your projects.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988739/d2nzltd1a2gypywzhv5f.png)\n\n\u003Ccenter>\u003Ci>The compliance center is where you can enforce compliance frameworks for your projects.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n\n![In the merge request, developers require approval if risks are detected before merging code, according to defined policies.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988740/szoocztazaup2avkaxhu.png)\n\n\u003Ccenter>\u003Ci>In the merge request, developers require approval if risks are detected before merging code, according to defined policies.\u003C/i>\u003C/center>\n\n### Shared security responsibility, not silos\n\nThe most successful retail security programs make security everyone's responsibility, not just the security team's burden.\n\n**Developer empowerment**\n\nSecurity and compliance guidance appears directly in merge requests, making it impossible to miss critical issues. Developers get immediate feedback on each commit, with clear explanations of risks and remediation steps. For example,  AI-powered vulnerability explanation and vulnerability resolution help developers understand and fix security issues independently, reducing bottlenecks and building security expertise across the team.\n\n![Vulnerability page with a button for explaining or resolving issues with AI. Helps to bridge the knowledge gap with AI.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988741/uenrjye3arfg9wjtwss1.png)\n\n\u003Ccenter>\u003Ci>Vulnerability page with a button for explaining or resolving issues with AI. Helps to bridge the knowledge gap with AI.\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\n**Automated compliance**\n\nGenerate audit reports, track license usage, and maintain a software bill of materials (SBOM) without manual effort.\n\n![GitLab's automated dependency report provides a comprehensive SBOM, displaying all project dependencies with their vulnerability status, license details, and security findings for complete transparency and compliance.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1756988739/gpakhdvkegloqxhaeje8.png)\n\n\u003Ccenter>\u003Ci>GitLab's automated dependency report provides a comprehensive SBOM, displaying all project dependencies with their vulnerability status, license details, and security findings for complete transparency and compliance.\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThis approach transforms security from a gate that slows delivery into a foundation that enables confident, rapid innovation.\n\n## Platform vs. point tools: What retailers need to know\n\n| Capability | Point Tools | GitLab DevSecOps Platform |\n|------------|-------------|---------------------------|\n| SAST/DAST/API/Fuzz | Separate & limited | Fully integrated |\n| License & dependency scanning | Often external tools | Built-in |\n| Compliance & audit reporting | Manual or disconnected | Automated with traceability |\n| Collaboration across teams | Fragmented | Unified environment |\n| End-to-end visibility | Tool-specific | Full lifecycle + value stream view |\n\n## The bottom line: Security excellence drives retail success\n\nIn retail, security isn't just about protecting data, it's about protecting the customer experience that drives revenue. When security slows down releases or creates vulnerabilities, it directly impacts sales. Your customers expect secure, seamless experiences every time.\n\nGitLab's integrated DevSecOps platform helps retailers:\n\n* **Deploy faster without compromising security** with automated scans that catch issues before customers do.  \n* **Meet compliance requirements effortlessly** through built-in reporting for GDPR, PCI-DSS, and industry standards.  \n* **Significantly reduce security tool costs** by replacing multiple point solutions with one platform.  \n* **Turn developers into security advocates** with guidance and automation, not roadblocks.\n\nTake a tour of some of GitLab's security capabilities:\n\n* [Resolving vulnerabilities with GitLab Duo](https://gitlab.navattic.com/ve-vr-short)   \n* [Adding scans to pipeline](https://gitlab.navattic.com/gitlab-scans)  \n* [Compliance frameworks](https://gitlab.navattic.com/compliance-short)  \n* [Advanced SAST](https://gitlab.navattic.com/advanced-sast-short)\n\n> Ready to get started? Discover how GitLab Ultimate with Duo Enterprise can streamline your retail security strategy with [a free trial](https://about.gitlab.com/free-trial/).",[691,738,717,9],"2025-09-04",{"featured":91,"template":693,"slug":816},"a-developers-guide-to-building-secure-retail-apps-with-gitlab","content:en-us:blog:a-developers-guide-to-building-secure-retail-apps-with-gitlab.yml","A Developers Guide To Building Secure Retail Apps With Gitlab","en-us/blog/a-developers-guide-to-building-secure-retail-apps-with-gitlab.yml","en-us/blog/a-developers-guide-to-building-secure-retail-apps-with-gitlab",{"_path":822,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":823,"content":829,"config":836,"_id":838,"_type":14,"title":839,"_source":16,"_file":840,"_stem":841,"_extension":19},"/en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"title":824,"description":825,"ogTitle":824,"ogDescription":825,"noIndex":6,"ogImage":826,"ogUrl":827,"ogSiteName":706,"ogType":707,"canonicalUrls":827,"schema":828},"Accelerate code reviews with GitLab Duo and Amazon Q","Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750096976/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750096975734.png","https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate code reviews with GitLab Duo and Amazon Q\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":824,"description":825,"authors":830,"heroImage":826,"date":831,"body":832,"category":10,"tags":833},[733],"2025-06-02","Code reviews are critical for catching bugs, improving code readability, and maintaining coding standards, but they can also be a major bottleneck in your workflow. When you're trying to ship features quickly, waiting for multiple team members to review your code can be frustrating. The back-and-forth discussions, the scheduling conflicts, and the time it takes to get everyone aligned can stretch what should be a simple review into days or even weeks.\n\nHere's where [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), our new offering that delivers agentic AI throughout the software development lifecycle for AWS customers, comes in to transform your review process. This intelligent, AI-powered solution can perform comprehensive code reviews for you in a fraction of the time it would take your human colleagues. By leveraging advanced agentic AI capabilities, GitLab Duo with Amazon Q streamlines your entire review workflow without sacrificing the quality and thoroughness you need. Think of it as having an always-available, highly skilled reviewer who can instantly analyze your code and provide actionable feedback.\n\n## How it works: Launching a code review\n\nSo how does GitLab Duo with Amazon Q actually work? Let's say you've just finished working on a feature and created a merge request with multiple code updates. Instead of pinging your teammates and waiting for their availability, you simply enter a quick command in the comment section: \"/q review\". That's it – just those two words trigger the AI to spring into action.\n\n![Triggering a code review using GitLab Duo with Amazon Q](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097002/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097002096.png)\n\nOnce you've entered the command, Amazon Q Service immediately begins analyzing your code changes. You'll see a confirmation that the review is underway, and within moments, the AI is examining every line of your updates, checking for potential issues across multiple dimensions.\nWhen the review completes, you receive comprehensive feedback that covers all the bases: bug detection, readability improvements, syntax errors, and adherence to your team's coding standards. The AI doesn't just point out problems, it provides context and suggestions for fixing them, making it easy for you to understand what needs attention and why.\n\nThe beauty of this agentic AI approach is that it handles the heavy lifting of code review while you focus on what matters most: building great software. You get the benefits of thorough code reviews — better bug detection, consistent coding standards, and improved code quality — without the time sink. Your deployment times shrink dramatically because you're no longer waiting in review queues, and your entire team becomes more productive.\n\n## Why use GitLab Duo with Amazon Q?\n\nGitLab Duo with Amazon Q transforms your development workflow in the following ways:\n- Lightning-fast code reviews that don't compromise on quality\n- Consistent application of coding standards across your entire codebase\n- Immediate feedback that helps you fix issues before they reach production\n- Reduced deployment times that let you ship features faster\n- More time for your team to focus on creative problem-solving instead of repetitive reviews\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can revolutionize your code review process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4gFIgyFc02Q?si=GXVz--AIrWiwzf-I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n> \n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)",[9,496,834,738,717,284,835,737],"code review","AWS",{"slug":837,"featured":91,"template":693},"accelerate-code-reviews-with-gitlab-duo-and-amazon-q","content:en-us:blog:accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","Accelerate Code Reviews With Gitlab Duo And Amazon Q","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"_path":843,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":844,"content":850,"config":857,"_id":859,"_type":14,"title":860,"_source":16,"_file":861,"_stem":862,"_extension":19},"/en-us/blog/agentic-ai-guides-and-resources",{"title":845,"description":846,"ogTitle":845,"ogDescription":846,"noIndex":6,"ogImage":847,"ogUrl":848,"ogSiteName":706,"ogType":707,"canonicalUrls":848,"schema":849},"Agentic AI guides and resources","Learn everything you need to know about agentic AI, including what it is, how it works, why it levels up your DevSecOps environment, and best practices for implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","https://about.gitlab.com/blog/agentic-ai-guides-and-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agentic AI guides and resources\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-05-07\",\n      }",{"title":845,"description":846,"authors":851,"heroImage":847,"date":853,"body":854,"category":10,"tags":855,"updatedDate":856},[852],"GitLab","2025-05-07","## Defining agentic AI\n\nAgentic AI is a type of artificial intelligence that leverages advanced language models and natural language processing to take independent action. Unlike traditional generative AI tools that require constant human direction, these systems can understand requests, make decisions, and execute multi-step plans to achieve goals. They tackle complex tasks by breaking them into manageable steps and employ adaptive learning to modify their approach when facing challenges.\n\n[Learn more about agentic AI](https://about.gitlab.com/topics/agentic-ai/)\n\n## Agentic AI insights\n- [GitLab Duo Agent Platform Public Beta: Next-gen AI orchestration and more](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) — Introducing the DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents.\n- [GitLab Duo Agent Platform: What's next for intelligent DevSecOps](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) — GitLab Duo Agent Platform, a DevSecOps orchestration platform for humans and AI agents, leverages agentic AI for collaboration across the software development lifecycle.\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/) — Discover how to implement vibe coding and agentic AI in your development process to increase productivity while maintaining code quality and security.\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/) — Discover how agentic AI transforms development from isolated coding to intelligent workflows that enhance productivity while maintaining security.\n- [Agentic AI: Unlocking developer potential at scale](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) — Explore how agentic AI is transforming software development, moving beyond code completion to create AI partners that proactively tackle complex tasks.\n- [Agentic AI, self-hosted models, and more: AI trends for 2025](https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/) — Discover key trends in AI for software development, from on-premises model deployments to intelligent, adaptive AI agents.\n- [How agentic AI unlocks platform engineering potential](https://about.gitlab.com/the-source/ai/how-agentic-ai-unlocks-platform-engineering-potential/) — Explore how agentic AI elevates platform engineering by automating complex workflows and scaling standardization.\n\n## The agentic AI ecosystem\n- [AI-driven code analysis: The new frontier in code security](https://about.gitlab.com/topics/agentic-ai/ai-code-analysis/) \n- [DevOps automation & AI agents](https://about.gitlab.com/topics/agentic-ai/devops-automation-ai-agents/)\n- [AI-augmented software development: Agentic AI for DevOps](https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/)\n\n## Best practices for implementing agentic AI\n\n- [Implementing effective guardrails for AI agents](https://about.gitlab.com/the-source/ai/implementing-effective-guardrails-for-ai-agents/) — Discover essential security guardrails for AI agents in DevSecOps, from compliance controls and infrastructure protection to user access management.\n\n## GitLab's agentic AI offerings\n\n### GitLab Duo with Amazon Q\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) — The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.\n- [DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS](https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws/) — Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.\n- [GitLab Duo with Amazon Q partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### Guided tour\nClick on the image to start a tour of GitLab Duo with Amazon Q:\n\n[![GitLab Duo with Amazon Q interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673568/Blog/Content%20Images/Screenshot_2025-05-07_at_7.24.45_AM.png)](https://gitlab.navattic.com/duo-with-q)\n\n#### GitLab Duo with Amazon Q tutorials\n- [Enhance application quality with AI-powered test generation](https://about.gitlab.com/blog/enhance-application-quality-with-ai-powered-test-generation/) — Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.\n- [GitLab Duo + Amazon Q: Transform ideas into code in minutes](https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes/) — The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.\n- [Accelerate code reviews with GitLab Duo and Amazon Q](https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q/) — Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.\n- [Speed up code reviews: Let AI handle the feedback implementation](https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation/) — Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.\n\n### GitLab Duo Agent Platform\n- [GitLab Duo Chat gets agentic AI makeover](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/) — Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.\nWatch GitLab Duo Agent Platform in action:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1095679084?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Platform Demo Clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### GitLab Agent Platform (and Duo Workflow) tutorials and use cases\n- [Accelerate learning with GitLab Duo Agent Platform](https://about.gitlab.com/blog/accelerate-learning-with-gitlab-duo-agent-platform/) — Learn how agentic AI helped generate comprehensive gRPC documentation in minutes, not hours.\n- [Fast and secure AI agent deployment to Google Cloud with GitLab](https://about.gitlab.com/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab/)\n\n- [Refactoring JavaScript to TypeScript with GitLab Duo Workflow](https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow/)\n\n- [Automating tedious coding tasks with GitLab Duo Workflow](https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow/) — See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.\n\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/) — Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).\n\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/) — Learn how a member of the GitLab Customer Success Management team uses agentic AI for real-world problem-solving, including addressing Helm chart limits in the package registry.\n\n- [Get started with GitLab Duo Agentic Chat in the web UI](https://about.gitlab.com/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui/) — Learn about our new GitLab Duo AI feature that automates tasks by breaking down complex problems and executing operations across multiple sources.\n\n- [Custom rules in GitLab Duo Agentic Chat for greater developer efficiency](https://about.gitlab.com/blog/custom-rules-duo-agentic-chat-deep-dive/) — Discover how AI can understand your codebase, follow your conventions, and generate production-ready code with minimal review cycles.\n\n## Learn more with GitLab University\n\n- [Get Started with GitLab Duo coursework](https://university.gitlab.com/pages/ai)\n- [GitLab Duo Enterprise Learning Path](https://university.gitlab.com/learning-paths/gitlab-duo-enterprise-learning-path)\n\n## More AI resources\n\n- [2024 Global DevSecOps Survey: Navigating AI maturity in DevSecOps](https://about.gitlab.com/developer-survey/2024/ai/)\n- [The Role of AI in DevOps](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/)\n- [The latest AI/ML articles from GitLab](https://about.gitlab.com/blog/categories/ai-ml/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)",[9,496,737],"2025-06-10",{"slug":858,"featured":91,"template":693},"agentic-ai-guides-and-resources","content:en-us:blog:agentic-ai-guides-and-resources.yml","Agentic Ai Guides And Resources","en-us/blog/agentic-ai-guides-and-resources.yml","en-us/blog/agentic-ai-guides-and-resources",{"_path":864,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":865,"content":871,"config":877,"_id":879,"_type":14,"title":880,"_source":16,"_file":881,"_stem":882,"_extension":19},"/en-us/blog/ai-assisted-code-suggestions",{"title":866,"description":867,"ogTitle":866,"ogDescription":867,"noIndex":6,"ogImage":868,"ogUrl":869,"ogSiteName":706,"ogType":707,"canonicalUrls":869,"schema":870},"How AI-assisted code suggestions will advance DevSecOps","In this second blog in our ‘Future of AI/ML in DevSecOps’ series, find out the impact of AI Assisted code suggestions on the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662840/Blog/Hero%20Images/ai-experiment-stars.png","https://about.gitlab.com/blog/ai-assisted-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How AI-assisted code suggestions will advance DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-03-23\",\n      }",{"title":866,"description":867,"authors":872,"heroImage":868,"date":874,"body":875,"category":10,"tags":876},[873],"Neha Khalwadekar","2023-03-23","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nArtificial intelligence (AI) and machine learning (ML) have made incredible technological strides and are now poised to [impact the software development process](/topics/devops/the-role-of-ai-in-devops/). As we can see, AI code suggestion proposals have already had a tremendous influence in helping programmers reduce repetitive tasks. AI-assisted code suggestions will enable developers to speed up coding, debugging, refactoring, documentation, and many more tasks, greatly enhancing the software development lifecycle (SDLC).\n\n![Trends adopting AI/ML from GitLab's DevSecOps Survey](https://about.gitlab.com/images/blogimages/ai-ml-adoption-03-2023.png){: .shadow}\n\n## What are suggestions for AI-assisted code?\n\nML techniques are used in AI-assisted code suggestions to assess code and recommend improvements. These recommendations involve modifying the syntax, streamlining the organization of the code, or suggesting more effective methods. By lowering errors, increasing effectiveness, and providing optimization advice, the aim is to assist developers in writing better code faster.\n\n![Animated gif image of code suggestions](https://about.gitlab.com/images/15_9/DemoFastApi.gif){: .shadow}\n\n## How can AI-assisted code suggestions help?\n\nAI-assisted code suggestions can substantially improve the programming experience by reducing errors and helping programmers write code faster, which will help reproduce the much higher production code quality. \n\nHere are some of those SDLC improvements:\n\n- **Decreased errors, increased accuracy.** The capacity of AI-assisted code suggestions to decrease errors and increase accuracy is a critical advantage over manually written code. Developers can prevent common syntax errors, organize their code better, and boost algorithm performance with code suggestions. This leads to more dependable and effective code, which produces fewer defects and higher-quality software.\n- **A rise in productivity.** AI-assisted code suggestions can increase developers' efficiency by producing better code faster and more efficiently, saving time and money. Additionally, code suggestions can automate repetitive activities like formatting code, freeing engineers to concentrate on more complex jobs.\n- **Improved collaboration.** AI-assisted code recommendations can improve developer collaboration. Code suggestions can ensure all developers are on the same page by offering consistent coding standards and ideas for improvement. This will lessen the possibility of mistakes and facilitates efficient teamwork.\n- **Faster rollout and iteration.** AI-assisted code recommendations can hasten the deployment and iteration processes. With fewer errors and more effective code, developers can iterate and release updates faster. Code reviews also are faster and more efficient. As a result, enterprises can quickly bring new features to market, providing them with a substantial competitive edge.\n\n### GitLab’s competitive advantages\n\nGitLab’s unified DevSecOps platform enables businesses to deliver software more quickly and efficiently while enhancing security and compliance and maximizing the total return of investment on software development. We anticipate GitLab AI Assisted Code Suggestions will extend and amplify these benefits to improve developer productivity, focus, and innovation without context switching and within a single DevSecOps platform using the GitLab Workflow VS Code extension to get code suggestions as they type. Depending on the user prompts, the extension provides entire code snippets like generating functions or completing the current line. Simply pressing the tab key enables you to accept the suggestions.\n\nAs AI technologies advance in sophistication, they will provide more individualized and nuanced ideas, increasing their value to programmers.\n\nThe low-code/no-code development sectors are where AI-assisted code suggestions are anticipated to have substantial impact. As these development platforms spread, we envision bringing AI-powered tools that can offer recommendations and optimizations to simplify the software creation and deployment process for non-technical users on GitLab.com. \n\nThe following are some of the critical jobs we intend to address for our customers with AI Assisted Code Suggestions in the DevSecOps Platform:\n\n- **Code optimization.** How can we drastically reduce the time and effort required for developers to examine and test their code by identifying redundant or inefficient lines of code and suggesting streamlined alternatives?\n- **Automatic bug detection and patching.** How can we analyze sizable codebases to find potential bugs or security flaws and can also offer patches to fix them?\n- **Smart debugging.** How can we assist developers in locating faults precisely and make suggestions for potential fixes? This can result in considerable time and effort savings for developers and quicker bug response.\n- **Continuous integration and deployment.** How can we facilitate continuous integration and deployment by identifying code changes that could cause potential conflicts? This will enable developers to resolve issues quickly and roll out production code faster.\n- **Predictive maintenance.** How can we monitor the performance of the code and find potential issues before they become serious? As a result, developers may proactively address faults, leading to more dependable and stable software.\n- **Programming in natural language.** How can we allow developers to build code via simple natural-language commands? This can result in more efficient development and a much shorter learning curve for new developers.\n- **Test case generation and automation.** How can we generate test cases and automate the testing process? In addition to ensuring that software is adequately tested before it is deployed, this can cut down on the time and effort needed for testing.\n- **Smart code completion.** How can we ensure developers write code faster and more precisely, which completes code snippets based on context? This may lead to fewer mistakes and more effective development.\n\nGitLab’s AI Assisted Code Suggestions are available to select Ultimate customers in a closed beta. For early access consideration, Ultimate customers can [submit this form](https://docs.google.com/forms/d/e/1FAIpQLSdSixexFKnIkFGBbmx6XJfBdEBACowhsO-DOm82q4rrAAuYmA/viewform). We’re working towards a wider open beta of this capability in the next few months. \n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":878,"featured":6,"template":693},"ai-assisted-code-suggestions","content:en-us:blog:ai-assisted-code-suggestions.yml","Ai Assisted Code Suggestions","en-us/blog/ai-assisted-code-suggestions.yml","en-us/blog/ai-assisted-code-suggestions",{"_path":884,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":885,"content":889,"config":897,"_id":899,"_type":14,"title":900,"_source":16,"_file":901,"_stem":902,"_extension":19},"/en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations",{"config":886,"title":887,"description":888},{"noIndex":6},"AI in Action Hackathon:  Celebrating the GitLab innovations ","Uncover breakthroughs from this AI development showcase that combined Google Cloud, MongoDB, and GitLab.",{"title":887,"description":888,"authors":890,"heroImage":892,"date":893,"body":894,"category":10,"tags":895},[891],"Nick Veenhof","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664458/Blog/Hero%20Images/Gartner_AI_Code_Assistants_Blog_Post_Cover_Image_1800x945.png","2025-08-05","The AI in Action Hackathon offered a compelling opportunity for developers to explore artificial intelligence. Running from May 6 to June 17, 2025, participants developed AI solutions and competed for a $50,000 prize pool. You can find more details about the contest and [explore the projects](https://ai-in-action.devpost.com/project-gallery).\n\nThis hackathon stood out because of a unique collaborative effort, bringing together Google Cloud, MongoDB, and GitLab. The aim was to cultivate an environment for AI development by combining Google Cloud's AI and cloud tools, MongoDB's intelligent data platform for AI, and GitLab's intelligent DevSecOps platform to ship more secure software faster with AI. This partnership allowed developers to integrate these powerful tools, reflecting real-world project dynamics.\n\nThis initiative sought to propel the developer community's growth, and collaboratively shape the future of DevSecOps. GitLab's specific focus in this hackathon was to inspire the creation of AI-enabled applications leveraging both GitLab and Google Cloud. Submissions were encouraged to include contributions to GitLab's product or develop functional components for the [GitLab CI/CD Catalog](https://gitlab.com/explore/catalog). \n\nUltimately, the AI in Action Hackathon became a vibrant stage for developer innovation. It ignited fresh ideas and equipped participants with tangible gains, including new skills, impactful projects for their portfolios, and new professional connections.\n## Meet the winners: AI in action with GitLab\n\nCongratulations to all participants, and specifically to the contest winners. Here's a highlight of the projects that stood out for their deep GitLab integration.\n\n**[Pipeline Doctor: Proactive health for your CI/CD](https://devpost.com/software/pipeline-doctor)**\n*\"As a software engineer, I frequently run into failed GitLab pipelines, often accompanied by cryptic and overwhelming logs. Pinpointing the root cause feels like searching for a needle in a haystack. Debugging becomes even more time-consuming when I have to rely on SREs for support.\" - the project's author*\n\nPipeline Doctor addresses this by using AI for advanced root cause analysis, swiftly diagnosing pipeline anomalies. It analyzes logs and changes to pinpoint errors, and could even explain security issues or predict bottlenecks. This means substantial productivity gains for developers, reclaiming time from troubleshooting to focus on new features. It also makes pipelines more reliable, aligning with goals for 80% faster CI builds and 90% less system maintenance. This project signifies a shift from reactive troubleshooting to proactive health monitoring. \n\nA truly impressive step towards more resilient pipelines.\n\n**[Agentic CICD: The future of automated DevSecOps](https://devpost.com/software/agentic-cicd)**\n\n*\"What if AI agents could handle most of the DevOps workload?”- the project’s author*\n\nAgentic CICD is set to profoundly elevate DevSecOps practices by automating code reviews, suggesting intelligent fixes, and optimizing testing and deployment decisions. These agents can evaluate real-time metrics, automate releases, and even initiate rollbacks without immediate human intervention, creating a self-improving feedback loop. This approach also enhances security by proactively identifying risks. The advantages for development teams are tangible: increased productivity, consistently higher software quality, and improved operational efficiency, accelerating development cycles and time-to-market. Agentic CICD cultivates a pipeline capable of *self-healing* and *self-optimization*, amplifying developer capabilities by automating routine tasks and providing intelligent insights. \n\nThis project truly showcases the next generation of intelligent automation.\n\n**[Agent Anansi: Your intelligent companion in GitLab](https://devpost.com/software/devgenius)**\n\n*“As someone deeply passionate about DevOps and AI, I was frustrated by the fragmented and reactive nature of traditional CI/CD workflows. While automation is widespread, intelligence is often lacking.“ -  the project's author*\n\nAgent Anansi, a name evoking the clever and resourceful spider from folklore, appears to be a versatile AI agent designed to enhance various GitLab workflows beyond the confines of CI/CD. GitLab's broader vision for AI agents includes systems that mirror familiar team roles and serve as foundational building blocks for highly customized agents. This intelligent companion is poised to enhance GitLab workflows by automating repetitive tasks like issue categorization, optimizing search functions, and performing intelligent data analysis. Similar to GitLab Duo's Chat Agent, Anansi could process natural language requests for information or debugging assistance. A compelling application could be an \"AI mentor\" suggesting personalized learning paths. The overall impact on collaboration and efficiency would be substantial, improving developer experience by minimizing manual tasks and reducing context-switching. It would also enhance collaboration by providing instant access to documentation and enabling direct actions through intelligent interaction. Agent Anansi functions as a personalized productivity co-pilot, moving beyond generic tool assistance to a truly personalized experience that increases individual developer efficiency and reduces cognitive load. \n\nA fantastic example of AI making daily development work smarter and more intuitive.\n\n## The power of partnership: Google Cloud, MongoDB, and GitLab fuel innovation\n\nThe AI in Action Hackathon underscored the potency of strategic partnerships in driving innovation. Google Cloud served as a foundational pillar, providing its advanced AI tools, machine learning capabilities, and extensive cloud computing resources as the bedrock for all hackathon projects. MongoDB offered the indispensable intelligent data layer, and GitLab provided the DevSecOps platform essential for building, securing, and deploying these sophisticated AI-enabled applications. Participants were granted access to these powerful tools through free trials or credits, reducing the barriers for experimentation.\n\nThe collaborative synergy among these partners was unmistakable in the multipartner structure of the hackathon. This environment allowed participants to explore a wide array of technologies and integration possibilities, enabling them to create innovative projects that addressed real-world problems. \n\n## Getting to know GitLab's Duo Agent Platform\n\nGitLab is reimagining software development, charting a future where humans and AI collaborate seamlessly. [GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) allows users to build, customize, and connect AI agents to match their workflow. Developers are empowered to focus on strategic, creative challenges, as AI agents adeptly manage routine tasks such as providing project status updates, bug fixes, and code reviews concurrently.\n\n[Duo Agent Platform is now in public beta](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) for GitLab Premium and Ultimate customers on GitLab.com and self-managed environments.\n\n[AI agents](https://about.gitlab.com/topics/agentic-ai/) on the platform leverage comprehensive context from your GitLab projects, code, and requirements. They can also interoperate with other applications or data sources for expanded context and actionable assistance. The platform delivers extensible, customizable agentic AI: Users can create and customize agents and agentic flows that understand their specific work processes and organizational needs. Custom rules can be defined in natural language, ensuring agents perform precisely as configured. A catalog for custom skills, agents, and flows is also planned for future release.\n\nDuo Agent Platform is seamlessly integrated into your workflow, available in your IDE (Integrated Development Environment) or GitLab’s web UI. It currently supports VS Code and the JetBrains family of IDEs, with Visual Studio support planned. This ability to set custom rules for agents, such as specific formatting for code or adherence to language versions, is poised to accelerate reviews and enable swifter deployment of consistent, secure code.\n\nTo get started, GitLab.com customers need to activate GitLab Duo beta features for their group, while self-managed customers need to enable these features for their GitLab Self-Managed instance. For those who are not yet GitLab customers, [a GitLab Ultimate trial](https://about.gitlab.com/free-trial/devsecops/), including Duo Agent Platform, is available at no cost.\n\n## Join the AI revolution: What's next for developers\n\nThe AI in Action Hackathon vividly showcased the transformative potential of artificial intelligence when applied to real-world software development challenges. For developers inspired by these breakthroughs, the journey into AI-powered DevSecOps has just started. Users are encouraged to explore and harness the power of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), which is engineered to substantially elevate productivity, enhance operational efficiency, and reduce security risks across the software development lifecycle. GitLab Duo offers a suite of integrated features, including intelligent Code Suggestions, an interactive Chat agent, AI-assisted Root Cause Analysis for CI/CD failures, and clear explanations for security vulnerabilities — all directly accessible within the platform.\n\nBeyond utilizing these powerful tools, developers are invited to contribute actively to the vibrant [GitLab community](https://about.gitlab.com/community/). This hackathon is an integral part of GitLab's broader community engagement initiative, which encourages contributions to [GitLab's open source community](https://about.gitlab.com/community/). By contributing, developers can directly shape the platform that millions use to deliver software faster and more securely. As a testament to GitLab's commitment to its community, contributors benefit from the very AI-powered tools, such as GitLab Duo, that they help build. Furthermore, GitLab recognizes and rewards community contributions through various programs, including the monthly Notable Contributor initiative and special recognition for Hackathon winners.\n\nThe AI in Action Hackathon showcased how a robust trust infrastructure, combined with emerging AI use cases, is forging a path toward a more trustworthy and efficient digital future. GitLab is dedicated to accelerating the monthly delivery of potent new AI features, with a clear strategic trajectory toward becoming a premier agent orchestration platform. GitLab is poised to empower users to craft, tailor, and disseminate complex agent flows, enabling highly automated and intelligent workflows. The landscape of software development is rapidly transforming, becoming progressively autonomous, adaptive, and AI-driven.\n\nI can’t wait to see what you will build next with GitLab!",[896,9,109],"open source",{"featured":91,"template":693,"slug":898},"ai-in-action-hackathon-celebrating-the-gitlab-innovations","content:en-us:blog:ai-in-action-hackathon-celebrating-the-gitlab-innovations.yml","Ai In Action Hackathon Celebrating The Gitlab Innovations","en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations.yml","en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations",{"_path":904,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":905,"content":911,"config":918,"_id":920,"_type":14,"title":921,"_source":16,"_file":922,"_stem":923,"_extension":19},"/en-us/blog/ai-ml-in-devsecops-series",{"title":906,"description":907,"ogTitle":906,"ogDescription":907,"noIndex":6,"ogImage":908,"ogUrl":909,"ogSiteName":706,"ogType":707,"canonicalUrls":909,"schema":910},"AI/ML in DevSecOps Series","This blog series chronicles our journey to integrate AI/ML throughout the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682693/Blog/Hero%20Images/ai-ml-in-devsecops-blog-series.png","https://about.gitlab.com/blog/ai-ml-in-devsecops-series","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI/ML in DevSecOps Series\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab AI Assisted Group\"}],\n        \"datePublished\": \"2023-04-24\",\n      }",{"title":906,"description":907,"authors":912,"heroImage":908,"date":914,"body":915,"category":10,"tags":916},[913],"GitLab AI Assisted Group","2023-04-24","\n\nOur \"AI/ML in DevSecOps\" series tracks GitLab's journey to build and integrate AI/ML into our DevSecOps platform. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab. So be sure to bookmark this page and follow along.\n\nThis series details [many features introduced during our AI Fireside Chat](/blog/gitlab-ai-assisted-features/) on May 3, 2023.\n\nGet a [full overview of our AI-powered DevSecOps platform](https://about.gitlab.com/solutions/ai/). \n\n1. [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/)\nGitLab users already benefit from a step-function increase in productivity when they adopt our platform: streamlined collaboration, operational efficiencies, and massive acceleration in time to delivery. But by introducing machine learning (ML) and other artificial intelligence (AI) capabilities into the fabric of The DevSecOps Platform feature set, we aim to take those gains to a whole new level.\n\n2. [How AI-assisted code suggestions will advance DevSecOps](/blog/ai-assisted-code-suggestions/)\nAI-assisted code suggestions can substantially improve the programming experience by reducing errors and helping programmers write code faster, which will help reproduce the much higher production code quality.\n\n3. [ML experiment: Writing SQL is about to get a lot easier](/blog/ml-experiment-sql/)\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n4. [ML experiment: Explain this source code](/blog/explain-this-code/)\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language.\n\n5. [ML experiment: Summarizing issue comments](/blog/summarize-issues/)\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language. In a rapid prototype, our own Alexandru Croitor, Senior Backend Engineer, and Nicolas Dunlar, Senior Frontend Engineer for our Plan stage, leverage generative AI LLMs to power comment summarization within GitLab's issues.\n\n6. [ML experiment: Summarize merge request changes](/blog/merge-request-changes-summary-ai/)\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\n7. [ML experiment: Generate tests for code changes](/blog/merge-request-suggest-a-test/)\nProposing changes and new features via merge requests is great, but what about the tests? Sometimes, tests can be the hardest part of any code change you make. We are leveraging generative AI to enable developers to create tests for merge request changes helping reduce the laborious but important task of writing tests increasing test coverage. \n\n8. [ML experiment: Explain this vulnerability](/blog/explain-this-vulnerability/)\nSecurity vulnerabilities aren't always easy to understand, especially for developers without experience or training with cybersecurity. We're leveraging AI to help developers understand security vulnerabilities and even get guidence on how to resolve them.\n\n9. [ML experiment: Use a chatbot to answer how-to questions](/blog/gitlab-chat-ai/)\nLarge language models (LLMs) have changed the way everyday people interact with large volumes of text. We thought, why not train an LLM on GitLab's extensive documentation to help users quickly answer natural language questions. Gone are the days of endless searching through vast documentation sites.\n\n10. [Track ML model experiments with new GitLab MLFlow integration](/blog/track-machine-learning-model-experiments/)\nModel experiments allow data scientists to track different variations of machine learning models directly on GitLab.\n\n11. [Code Suggestions available to all GitLab tiers while in Beta](/blog/code-suggestions-for-all-during-beta/)\nWe've made code suggestions available to all plans for free during Beta. Also, learn about recent updates to Code Suggestions.\n\n12. [ML experiment: Summarize my merge request review](/blog/summarize-my-merge-request-review/) \nLearn how GitLab is experimenting with ML-powered merge request review summaries.\n\n13. [How Code Suggestions can supercharge developers' daily productivity](/blog/code-suggestions-improves-developer-productivity/)\nLearn how you can use GitLab Code Suggestions to accelerate your development.\n\n14. [ML experiment: Extending Code Suggestions to more development environments](/blog/extending-code-suggestions/)\nLearn how we're expanding Code Suggestions to support Visual Studio, \nJetBrains IDEs, Neovim, and other development environments.\n\n15. [Train and deploy AI models with GitLab and Google Vertex AI](/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai/)\nA demonstration of GitLab's DevSecOps capabilities combined with Vertex AI's scalable ML platform, designed with the aim of rapid and secure AI deployments. \n\n16. [Self-managed support for Code Suggestions (Beta)](/blog/self-managed-support-for-code-suggestions/)\nOne of our most popular customer requests – self-managed support for Code Suggestsions (Beta) – is expected to ship soon in GitLab 16.1. Learn how it will work.\n\n17. [Meet GitLab Duo - The suite of AI capabilities powering your workflows](/blog/meet-gitlab-duo-the-suite-of-ai-capabilities/)\nLearn about GitLab Duo, an expanding toolbox of features integrated directly into the GitLab platform to assist DevSecOps teams.\n\n18. [GitLab for Visual Studio, including code suggestions, available in Beta](/blog/gitlab-visual-studio-extension/)\nGitLab for Visual Studio extension supports GitLab Duo code suggestions for both GitLab SaaS and GitLab self-managed.\n\n19. [Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD](/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners)\nLearn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.\n\n20. [GitLab Duo Code Suggestions for JetBrains and Neovim](/blog/gitlab-jetbrains-neovim-plugins/) GitLab plugins for JetBrains IDEs and Neovim are now available in Beta,\nbringing GitLab Duo Code Suggestions to more software development environments.\n\nWant to learn even more about AI/ML? Check out our [AI Assisted Group direction page](/direction/modelops/ai_assisted/) and more [AI/ML articles](/blog/tags.html#AI/ML).\n",[917,738,717,9],"DevOps",{"slug":919,"featured":6,"template":693},"ai-ml-in-devsecops-series","content:en-us:blog:ai-ml-in-devsecops-series.yml","Ai Ml In Devsecops Series","en-us/blog/ai-ml-in-devsecops-series.yml","en-us/blog/ai-ml-in-devsecops-series",{"_path":925,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":926,"content":932,"config":938,"_id":940,"_type":14,"title":941,"_source":16,"_file":942,"_stem":943,"_extension":19},"/en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery",{"title":927,"description":928,"ogTitle":927,"ogDescription":928,"noIndex":6,"ogImage":929,"ogUrl":930,"ogSiteName":706,"ogType":707,"canonicalUrls":930,"schema":931},"AI-powered growth: Transform every stage of software delivery","Find out how the latest developments in the GitLab AI-powered DevSecOps Platform boost efficiency throughout the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669095/Blog/Hero%20Images/gitlabduo.png","https://about.gitlab.com/blog/ai-powered-growth-transform-every-stage-of-software-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI-powered growth: Transform every stage of software delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Laurena Alves\"}],\n        \"datePublished\": \"2024-01-29\",\n      }",{"title":927,"description":928,"authors":933,"heroImage":929,"date":935,"body":936,"category":10,"tags":937},[934],"Laurena Alves","2024-01-29","It’s clear artificial intelligence (AI) is top of mind for organizations and their software development teams. With developers and security teams forced to focus so much of their time on repetitive, mundane tasks, of course they’re looking for ways to ease that work and carve out more time in their schedules.\n\nAccording to the [GitLab 2023 State of AI in Software Development report](https://about.gitlab.com/developer-survey/), developers noted spending 25% of their total work time writing code, with the rest spent improving existing code, understanding existing code, testing, maintaining code, and identifying and mitigating security vulnerabilities. As organizations incorporate AI into their DevSecOps processes, they'll need to adopt AI solutions — such as vulnerability explanations, code change summaries, automated tests, and more — that will help them introduce efficiencies and boost productivity and collaboration across the entire software development lifecycle (SDLC).\n\nWe know AI, with the efficiencies it brings across the entire SDLC, has a role to play in saving developers’ time, improving their jobs, and helping speed the production of secure software. Over the past few months, GitLab has released a set of AI-powered features that will help you do just that. \n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### GitLab Duo Pro: Unleash the power of AI\n\nFor Ultimate and Premium customers, [GitLab Duo Pro](https://about.gitlab.com/pricing/) is a new add-on package that brings together Code Suggestions, GitLab Duo Chat (Beta), and organizational control capabilities, ensuring that teams can take advantage of AI exactly where they need it. \n\nCode Suggestions helps automate routine coding tasks, allowing developers to focus on delivering higher-quality software at speed, while Chat helps teams write and understand code faster, get up to speed on the status of projects, and quickly learn GitLab. And the organizational controls included in GitLab Duo Pro give organizations fine-grained control over which users have access to AI, ensuring alignment with team workflows and security requirements. At an introductory price of $9 USD per user per month (valid until January 31), and then $19 USD per user per month after February 1, GitLab Duo Pro is an investment in developer efficiency and organizational governance. [Contact us today to get started](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n### Remove roadblocks and ship faster\n\nDevelopers struggle with repetitive tasks slowing down their workflow and hindering innovation. Manual code reviews and testing take additional time, inhibiting release cycles. And inconsistent quality standards raise security alarms and can create potential vulnerabilities. This all stifles productivity and reduces developer happiness. \n\nWith the recent general availability of [Code Suggestions](https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available/), developers of all skill levels now have a virtual assistant that makes building software faster, more efficient, and more secure. The AI-powered feature now has enhancements like [Code Generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/), which generates code based on a natural language comment blocks, and [Repository X-ray](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/repository_xray.html), which improves the accuracy and relevance of code recommendations. \n\nCode Suggestions is your virtual pair programming buddy that can handle repetitive tasks, giving developers more time to focus on creating innovative products. Acting as a coding sidekick, this AI capability can automatically generate lines of code, complete functions, and even extend unfamiliar codebases. Code Suggestions supports 15 languages, including C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. It also integrates seamlessly into many popular IDEs like VS Code, Visual Studio, JetBrains’ suite of IDEs, and Neovim.\n\n### Gain the power of an AI assistant\n\nWhile Code Suggestions takes the spotlight, it's just one capability of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), which is a full suite of [15 AI-assisted features](https://docs.gitlab.com/ee/user/ai_features.html) that go beyond code generation, catering to every stage of software development — from code generation and testing to security analysis and documentation. [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) is the foundational technology that connects users with our broader set of AI-powered GitLab Duo features. With Chat, users have a personal AI assistant that helps them answer questions, understand code, and even generate tests — all within the same IDE where development work happens. Together, these capabilities form a cohesive ecosystem. \n\nThese features are critical because they’ll help prevent bottlenecks from forming downstream as developers, who are getting a productivity boost from Code Suggestions, create more code faster. GitLab Duo offers targeted solutions, like Vulnerability Explanation and Merge Request Summary, to improve software quality and keep workflows moving.\n\nImportantly, GitLab prioritizes privacy and transparency in its AI approach. Customer code is not used for training, ensuring organizations have complete data ownership and control.\n\n> Live demo! Discover the future of AI-driven software development at our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n",[9,496,738],{"slug":939,"featured":91,"template":693},"ai-powered-growth-transform-every-stage-of-software-delivery","content:en-us:blog:ai-powered-growth-transform-every-stage-of-software-delivery.yml","Ai Powered Growth Transform Every Stage Of Software Delivery","en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery.yml","en-us/blog/ai-powered-growth-transform-every-stage-of-software-delivery",{"_path":945,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":946,"content":952,"config":958,"_id":960,"_type":14,"title":961,"_source":16,"_file":962,"_stem":963,"_extension":19},"/en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"title":947,"description":948,"ogTitle":947,"ogDescription":948,"noIndex":6,"ogImage":949,"ogUrl":950,"ogSiteName":706,"ogType":707,"canonicalUrls":950,"schema":951},"Automate tedious coding tasks with GitLab Duo Workflow","See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662465/Blog/Hero%20Images/GitLab_Duo_Workflow_Unified_Data_Store__1_.png","https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate tedious coding tasks with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Park\"}],\n        \"datePublished\": \"2025-05-06\",\n      }",{"title":947,"description":948,"authors":953,"heroImage":949,"date":955,"body":956,"category":10,"tags":957},[954],"Jeff Park","2025-05-06","Working with large codebases often means spending significant time on repetitive tasks that, while necessary, don't really push your projects forward. The good news is that these tasks are great candidates to be completed with AI. Reducing the time spent on them will free you up to work on more important problems that you’re actually excited to tackle. With GitLab Duo Workflow, the time spent on these tasks will go from hours to minutes.\n\n[Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) is a powerful new agentic solution, currently in private beta, that lives in VS Code and is designed to help you complete complex development tasks. While many AI coding assistants are focused on helping developers write code, Duo Workflow understands your project structure, reads your files, and can make coordinated changes across your entire codebase.\n\nI created a demonstration that showcases how Duo Workflow can transform a tedious coding task into a streamlined process that saves you time and mental energy.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1081627484?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Automate tedious coding tasks with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Implementing a new lint rule\n\nIn this demo, we tackle a common scenario that many developers face: implementing a new lint rule and then updating multiple files across the codebase to comply with this rule. The specific issue involves validation errors occurring in several project files that need to be addressed consistently.\n\nRather than manually identifying and modifying each affected file one by one – a process that could take hours depending on the size of your codebase – we'll see how Duo Workflow can:\n\n1. Read and understand the details from an issue\n2. Analyze the project structure to identify affected files\n3. Create a comprehensive plan to implement the necessary changes\n4. Draft a new lint rule to prevent future occurrences\n5. Make consistent code changes across all relevant files\n6. Stage the changes for your review before any commits are made\n\nA simple prompt initiates the process:\n\n\"Read through issue #1 in this project and submit code changes to resolve it. Be sure to look at each tool file and make all appropriate changes.\"\n\nFrom there, Duo Workflow takes over – reading the issue, analyzing the files, creating a plan, and implementing the solution – all while keeping me informed of its progress and reasoning.\n\n## Why this matters for your development process\n\nWhat's particularly powerful about Duo Workflow is how it maintains awareness of this wider context throughout the entire process. It's not just making text replacements based on a large language model's training data – it's understanding the code, making intelligent decisions, and proposing a complete solution that you maintain full control over.\n\nThis approach offers several key benefits:\n\n* **Consistency in implementation:** Apply changes uniformly across files\n* **Time savings:** Focus your energy on creative problem-solving rather than repetitive tasks\n* **Reduced context switching:** Complete complex tasks without leaving your IDE\n* **Keeping a human in the loop:** Review all proposed modifications before committing\n\n## What's next\n\nGitLab Duo Workflow is part of our work to bring AI-powered capabilities to every stage of the software development lifecycle. While this demo focuses on code editing, the same approach can be applied to various development tasks:\n\n* Implementing new features based on issue descriptions\n* Fixing bugs with comprehensive test coverage\n* Refactoring legacy code to modern standards\n* Creating documentation from codebase analysis\n\nWe believe that by automating repetitive tasks, Duo Workflow helps you focus on what matters most – solving interesting problems and creating innovative solutions for your users.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/agent-platform/)\n\n## Learn more\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/)\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n",[9,496,717,738,737,759],{"slug":959,"featured":91,"template":693},"automate-tedious-coding-tasks-with-gitlab-duo-workflow","content:en-us:blog:automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","Automate Tedious Coding Tasks With Gitlab Duo Workflow","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"_path":965,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":966,"content":972,"config":979,"_id":981,"_type":14,"title":982,"_source":16,"_file":983,"_stem":984,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"title":967,"description":968,"ogTitle":967,"ogDescription":968,"noIndex":6,"ogImage":969,"ogUrl":970,"ogSiteName":706,"ogType":707,"canonicalUrls":970,"schema":971},"Automating with GitLab Duo, Part 1: Generating tests","Learn how we used the AI-driven DevSecOps platform to generate automated tests and improve our development speed and quality.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097480/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097480784.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 1: Generating tests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-02\",\n      }",{"title":967,"description":968,"authors":973,"heroImage":969,"date":975,"body":976,"category":10,"tags":977},[974],"Byron Boots","2024-12-02","Automated testing is time-consuming and can feel like it’s not moving a project forward. However, as many developers have likely experienced, automated testing provides an overall positive return on investment. In building a custom module (we'll call it gitlab-helper for this article), this was particularly true.\n\nOur initial development focused on migrating tried and used functionality from existing scripts to a new module whose sole purpose was to serve as a baseline for future functionality. Although existing scripts lacked automated testing, their consistent usage was strong anecdotal evidence the functionality worked as expected.\n\nOur objective was to deliver a more mature solution to this problem, so automated testing became a necessity. This introduced the challenge of building efficiently, while balancing the time to test and ensure a robust product; and with a total of three team members, this was no small bottleneck. Therefore, the team decided to take advantage of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities, for test generation, improving speed and quality of the delivered product.\n\nIn this three-part series on automating with GitLab Duo, we will cover:\n\n1. How we used GitLab Duo to generate tests for our code  \n2. How we worked interactively with GitLab Duo for more complex situations  \n3. The results we were able to achieve (Spoiler: 1 developer + GitLab Duo = 84% coverage in 2 days)\n\n## Using GitLab Duo to generate tests for code\n\nWhile functionality is available across tools, this article will cover using GitLab Duo in VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) to generate tests. Links to other GitLab Duo options are available in the [references](#references) below.\n\n### Install and enable GitLab Duo\n\nAs a prerequisite to using GitLab Duo, we ensured we had a GitLab Duo-enabled account. If you don't have GitLab Duo, you can [sign up for a free trial](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial).\n\nTo use GitLab Duo Chat in VS Code, we followed the [instructions for installation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#use-gitlab-duo-chat-in-vs-code). Then, we were able to see the GitLab Duo Chat extension on the sidebar and open the Chat window.\n\n![Ask a question window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097488918.png)\n\n### Generate tests with Chat\n\ngitlab-helper is a custom module built for standardizing interaction with the GitLab API across the team's work and extends other library functionalities to simplify development and scripting work. Once a method or feature was migrated to gitlab-helper and appeared to be implemented appropriately, the process to generate tests for it was simple:\n- Select the method, class, or entire file in the IDE.\n- Right-click on the selected code.\n- Under **GitLab Duo Chat**, select **Generate tests**.\n\n![Sequence to generate tests, including drop-down for generate tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097488919.png)\n\nWithin a few seconds, tests were generated and presented in the GitLab Duo Chat window. These tests can be reviewed and or added to the codebase, via copy/paste, into existing or new test files. As is the case with most natural language processing generations today, particularly around context, some of the initial tests created by GitLab Duo failed, thus requiring finetuning (for instance, when dealing with nested dependencies).\n\n> **Pro tip:** GitLab Duo does not auto-create files to add generated tests to. We found it was helpful to create new test files and add a `# Tests Generated by Duo` comment at the top of them and suffix them with `_duo.py` to indicate where the tests came from.\n\nGitLab Duo provided a great starting point for building out gitlab-helper’s automated testing and greatly improved test writing efficiency and code coverage, speeding up the development process substantially. Alongside GitLab Duo, numerous iterations of valuable tests were introduced into the gitlab-helper module with human oversight.\n\nRead the next installment in this series where we share [what we learned while using GitLab Duo for generating automated tests](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/) and working interactively with AI for more complex situations.\n\n## References\n\nThere’s more than one way to use GitLab Duo to generate tests, check out the other options below:\n\n* The GitLab UI  \n* [The GitLab Web IDE (VS Code in the cloud)](https://docs.gitlab.com/ee/user/project/web_ide/index.html)  \n* VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n* JetBrains IDEs, with the [GitLab Duo Plugin for JetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n* Visual Studio for Windows, with the [GitLab Extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n",[9,737,978,496,717],"testing",{"slug":980,"featured":6,"template":693},"automating-with-gitlab-duo-part-1-generating-tests","content:en-us:blog:automating-with-gitlab-duo-part-1-generating-tests.yml","Automating With Gitlab Duo Part 1 Generating Tests","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests.yml","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"_path":986,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":987,"content":993,"config":998,"_id":1000,"_type":14,"title":1001,"_source":16,"_file":1002,"_stem":1003,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-2-complex-testing",{"title":988,"description":989,"ogTitle":988,"ogDescription":989,"noIndex":6,"ogImage":990,"ogUrl":991,"ogSiteName":706,"ogType":707,"canonicalUrls":991,"schema":992},"Automating with GitLab Duo, Part 2: Complex testing","Find out how the GitLab team addressed more complex testing situations using GitLab Duo's AI capabilities, including ensuring that code testing followed standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099243/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750099243011.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 2: Complex testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":988,"description":989,"authors":994,"heroImage":990,"date":995,"body":996,"category":10,"tags":997},[974],"2024-12-10","The first part of our three-part series on [test generation with GitLab Duo](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) focused on how to automate code testing. Now, we will share the lessons we learned while using AI for test generation.\n\n## Situations we encountered and how we handled them\n\nOverall, we were pleased with the results using [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to generate tests on our code. As is the case with any language generation, some cases required minor adjustments such as fixing import paths or editing contents in datasets. For the more complex cases, we had to remember that AI solutions often lack context. Here's how we handled the more complex testing situations with GitLab Duo.\n\n### Updating existing test cases\n\nAs is often the case when developing a software product, we encountered instances that required updates to existing tests. Rather than manually making adjustments to a full test suite for a common issue, we took full advantage of the GitLab Duo Chat window in VS Code. For example, to refactor tests, we used the Chat prompt “Please update the provided tests to use unittest rather than pytest” followed by pasting in the tests we wanted GitLab Duo to update.\n\n![Automated test generation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099252/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099252303.png)\n\n\u003Cbr>\u003C/br>\n\n![Chat prompt requesting use of unittest rather than pytest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099252/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099252304.png)\n\n**Note:** We copy-and-pasted GitLab Duo's recommendations into our code.\n\n### Creating tests for legacy code\n\nCreating tests for legacy code we knew worked was another challenging situation we encountered. In such circumstances, it was valuable to provide error snippets alongside failing tests and ask GitLab Duo to provide new tests. A full copy-and-paste from the terminal window of noted failures and errors to Chat, along with a request to “Please explain and fix this failing test” or similar prompts, yielded a summary of the issues the test was encountering as well as a new test addressing the problem. We did find this sometimes required multiple rounds of refactoring as new test failures were identified. However, the efficiency of GitLab Duo to provide various refactored solutions was fast and a net positive on team and developer efficiency.\n\n### Working with complex or abstracted code\n\nIn other instances, the modularization or complexity of our code led to variance in GitLab Duo’s results. For instance, when generating tests, GitLab Duo sometimes generated a series of passing and failing tests caused by differences in testing approach (e.g. usage of Mock and which objects were mocked). We provided GitLab Duo its own example of a passing test and asked it to modify individual tests one at a time to match the style of the passing tests to maintain consistency. We also would provide GitLab Duo a file of functioning tests for a similar object or task so it could mirror the structure.\n\n### Ensuring generated code follows our standards\n\nWhile developing a Python module, GitLab Duo generated many tests using Mock and often they required refactoring, particularly around naming standardization. In such cases, we could leverage GitLab Duo Chat to refactor tests with instructions as to which specific test components to update. Prompting GitLab Duo for these changes was immensely faster than refactoring tests individually, as we had previousy done.\n\n### Addressing uncovered test cases\n\nGitLab Duo generated tests for additional test cases the team had not previously considered, thus increasing coverage. Luckily, we could use GitLab Duo to quickly and efficiently address these edge cases and expand testing coverage, which is a key value-add for our team to build quickly and ensure a robust product.\n\n## What we learned\n\nHere are few key lessons that have been important to our success with GitLab Duo:\n\n* **Fast and efficient for rapid development and iteration -** GitLab Duo’s role in generating automated tests has been a key accelerator in development for our team and allowed us to work faster and with greater confidence in our changes.\n* **Important to use appropriate prompts -** When using GitLab Duo for our use case, we touched on a key topic for machine learning optimization: prompt engineering. Sometimes we needed to modify our question by just a few keywords to lead to the ideal generated answer. \n* **Need understanding of underlying frameworks and code -** When it comes to any AI-generated code that makes it into a product, even if only as testing, it’s critical that we understand how the code functions so we can adequately debug as well as request informed changes.\n* **Need understanding of desired end state and standards -** Similar to following coding standards for formatting and library usage while developing without AI, it’s important to maintain the vision of what the intended outcomes look like and what standards are being adhered to when using AI. GitLab Duo needs the context to understand code standards, so it’s critical for team members using GitLab Duo to provide adequate oversight of its outputs to ensure quality and other expectations are met.\n* **GitLab Duo is not a replacement for all tests -** While we use GitLab Duo significantly for generating automated tests, it does not replace our other tests and human oversight. Functional tests, integration tests, and more still serve a valuable place in the QA process and overall software development lifecycle.\n\nIn our next article in this series, we’ll cover [a test we ran to validate the impact of GitLab Duo on our team’s automated testing](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-3-validating-testing/) and discuss the impressive results we have achieved thus far.",[9,978,496],{"slug":999,"featured":6,"template":693},"automating-with-gitlab-duo-part-2-complex-testing","content:en-us:blog:automating-with-gitlab-duo-part-2-complex-testing.yml","Automating With Gitlab Duo Part 2 Complex Testing","en-us/blog/automating-with-gitlab-duo-part-2-complex-testing.yml","en-us/blog/automating-with-gitlab-duo-part-2-complex-testing",{"_path":1005,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1006,"content":1012,"config":1017,"_id":1019,"_type":14,"title":1020,"_source":16,"_file":1021,"_stem":1022,"_extension":19},"/en-us/blog/automating-with-gitlab-duo-part-3-validating-testing",{"title":1007,"description":1008,"ogTitle":1007,"ogDescription":1008,"noIndex":6,"ogImage":1009,"ogUrl":1010,"ogSiteName":706,"ogType":707,"canonicalUrls":1010,"schema":1011},"Automating with GitLab Duo, Part 3: Validating testing","Discover what test we ran to validate the impact of GitLab Duo on our team’s automated testing – and the results we achieved.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097447/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097447404.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-3-validating-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 3: Validating testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-17\",\n      }",{"title":1007,"description":1008,"authors":1013,"heroImage":1009,"date":1014,"body":1015,"category":10,"tags":1016},[974],"2024-12-17","In previous entries in this series, we covered [how we used GitLab Duo to generate tests for our code](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) as well as [what we learned while using GitLab Duo for generating automated tests](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/). We also shared some of the ways we addressed making changes to GitLab Duo generated tests. This last article in the series will cover a test we ran to validate the impact of GitLab Duo on our team’s automated testing and discuss the impressive results we have achieved thus far.\n\n### Validation testing results\n\nTo validate that our usage of GitLab Duo to generate tests was adding value the way we expected, we challenged ourselves and GitLab Duo to replace and increase test coverage. The team removed all previously written tests to get our test coverage to 0% and then methodically went through the repository and created new test files to store GitLab Duo-generated tests.\n\nFrom this starting point, the team followed the steps outlined in [the first blog](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests/) to generate tests. Tests and test files were unmodified by humans to provide a stable control group and a `Tests Generated by Duo` comment at the top of them were suffixed by `duo.py` to indicate where the tests came from.\n\nAll iterations of the tests were only done through interactions with GitLab Duo through the `Generate Tests` and GitLab Duo Chat window as outlined in [the second blog in the series](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/). As we shared, we requested GitLab Duo to make updates based on encountered errors, test failures, and example code snippets for GitLab Duo to use as added context. \n\nAt all times. when testing with GitLab Duo, we were running tests and coverage reports so we could see if our GitLab Duo-generated tests were increasing testing coverage and adding value as we expected. Taking advantage of [GitLab's test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization/), we were able to continuously monitor the results of our work.\n\nUltimately, after using GitLab Duo to regenerate tests for code previously covered through our mostly manual testing, we were able to achieve test coverage of 84%. This was a great accomplishment for the team because:\n\n1. It was a significant improvement from prior coverage, which was at 74%.  \n2. It took approximately two days by one engineer to achieve 84%, compared to the approximately four weeks across multiple engineers that the 74% had taken.\n\nSince this experiment, the team has increased coverage even further to 89% with the help of GitLab Duo, while continuing to introduce new features.\n\n![image of achievements](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097456/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097456771.png)\n\nUsing GitLab Duo allowed for increased testing efficiency and coverage, and also allowed developers with lower context around existing code to write valuable tests, quickly. This has resulted in increased confidence on the team to develop new features without worrying about introducing errors.\n\n> If you'd like to [try GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/), sign up for a free trial today!\n",[9,978,496,717],{"slug":1018,"featured":6,"template":693},"automating-with-gitlab-duo-part-3-validating-testing","content:en-us:blog:automating-with-gitlab-duo-part-3-validating-testing.yml","Automating With Gitlab Duo Part 3 Validating Testing","en-us/blog/automating-with-gitlab-duo-part-3-validating-testing.yml","en-us/blog/automating-with-gitlab-duo-part-3-validating-testing",{"_path":1024,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1025,"content":1031,"config":1038,"_id":1040,"_type":14,"title":1041,"_source":16,"_file":1042,"_stem":1043,"_extension":19},"/en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"title":1026,"description":1027,"ogTitle":1026,"ogDescription":1027,"noIndex":6,"ogImage":1028,"ogUrl":1029,"ogSiteName":706,"ogType":707,"canonicalUrls":1029,"schema":1030},"Build an ML app pipeline with GitLab Model Registry using MLflow","Learn how to manage your ML apps entirely through GitLab with this tutorial. Also discover the role machine learning operations, or MLOps, plays in automating the DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build an ML app pipeline with GitLab Model Registry using MLflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gufran Yeşilyurt, OBSS\"},{\"@type\":\"Person\",\"name\":\"Péter Bozsó\"}],\n        \"datePublished\": \"2024-09-17\",\n      }",{"title":1026,"description":1027,"authors":1032,"heroImage":1028,"date":1035,"body":1036,"category":10,"tags":1037},[1033,1034],"Gufran Yeşilyurt, OBSS","Péter Bozsó","2024-09-17","__*Editor's note: From time to time, we invite members of our partner\ncommunity to contribute to the GitLab Blog. Thanks to Gufran Yeşilyurt, a\nDevOps consultant at OBSS Technology, for co-creating with us.*__\n\n\nThis tutorial will walk you through setting up an MLOps pipeline with GitLab\nModel Registry, utilizing MLflow. This will be a great starting point to\nmanage your ML apps entirely through GitLab. But first, it is crucial to\nunderstand why we need MLOps and what GitLab offers.\n\n\n[MLOps](https://about.gitlab.com/direction/modelops/mlops/#overview), or\nmachine learning operations, is a critical practice for managing and\nautomating the lifecycle of machine learning models, from development to\ndeployment and maintenance. Its importance lies in addressing the complexity\nand dynamism of machine learning workflows, which involve not just software\ndevelopment but also data management, model training, testing, deployment,\nand continuous monitoring.\n\n\nMLOps ensures that models are reproducible, scalable, and maintainable,\nfacilitating collaboration between data scientists, machine learning\nengineers, and operations teams. By incorporating MLOps, organizations can\nstreamline the deployment process, reduce time to market, and improve the\nreliability and performance of their machine learning applications.\n\n\nThe necessity of MLOps arises from the unique challenges posed by machine\nlearning projects. Unlike traditional software development, machine learning\ninvolves handling large datasets, experimenting with various models, and\ncontinuously updating models based on new data and feedback.\n\n\nWithout proper operations, managing these aspects becomes cumbersome,\nleading to potential issues like model drift, where the model's performance\ndegrades over time due to changes in the underlying data. MLOps provides a\nstructured approach to monitor and manage these changes, ensuring that\nmodels remain accurate and effective. Moreover, it introduces automation in\nvarious stages, such as data preprocessing, model training, and deployment,\nthereby reducing manual errors and enhancing efficiency.\n\n\nGitLab's features play a pivotal role in implementing MLOps effectively.\nGitLab provides an integrated platform that combines source code management,\n[CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/), tracking and\ncollaboration tools, making it ideal for managing machine learning projects.\n\n\nWith GitLab, teams can leverage version control to track changes in both\ncode and data, ensuring reproducibility and transparency. The CI/CD\npipelines in GitLab automate the testing and deployment of machine learning\nmodels, allowing for continuous integration and continuous delivery. This\nautomation not only speeds up the deployment process but also ensures\nconsistency and reliability in the models being deployed. \n\n\nAdditionally, GitLab's collaboration features, such as merge requests and\ncode reviews, facilitate better communication and coordination among team\nmembers, ensuring that everyone is aligned and any issues are promptly\naddressed.\n\n\nPrerequisites:\n\n- basic knowledge of GitLab pipelines\n\n- basic knowledge of MLflow\n\n- a Kubernetes cluster\n\n- Dockerfile\n\n\nThis tutorial includes instructions to:\n\n- [Set up environment variables of\nMLflow](#set-up-environment-variables-of-mlflow)\n\n- [Train and log candidates at merge\nrequest](#train-and-log-candidates-at-merge-request)\n\n- [Register the most successful\ncandidate](#register-the-most-successful-candidate)\n\n- [Dockerize and deploy an ML app with the registered\nmodel](#dockerize-and-deploy-an-ml-app-with-the-registered-model)\n\n\nIn this example, to decide whether to provide the user a loan, we make use\nof Random Forest Classifier, Decision Tree, and Logistic Regression. At the\nend of this showcase, we will have a web application that utilizes machine\nlearning to respond to the user.\n\n\nTo reproduce this example in your own GitLab environment, you can read the\nrest of this article or follow the video below. You can find the source code\nof this example in [these OBSS\nrepositories](https://gitlab.com/gitlab-partners-public/obss).\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/grNJAp1xAi0?si=Bf9CAP9lB1uWErOZ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Set up environment variables of MLflow\n\n\nOn the host where the code is executed, set the environment variables for\ntracking URI and token. This might be a remote host, CI pipeline, or your\nlocal environment. When they are set, you can call\n`mlflow.set_experiment(\"\u003Cexperiment_name>\")`. As a reference:\n\n\n```\n\nexport MLFLOW_TRACKING_URI=\"\u003Cyour gitlab endpoint>/api/v4/projects/\u003Cyour\nproject id>/ml/mlflow\"\n\nexport MLFLOW_TRACKING_TOKEN=\"\u003Cyour_access_token>\"\n\n```\n\n\n**Note:** If the training code contains the call to\n`mlflow.set_tracking_uri()`, remove it.\n\n\n## Train and log candidates at merge request\n\n\nIn your model train code, you can use MLflow methods to log metrics,\nartifacts, and parameters. You can also divide the train steps into pipeline\nstages if you are comfortable with that part. In this example, one Python\nfile will be used for both training and report generation.\n\n\n```\n\nmlflow.log_params(params)\n\nmlflow.log_metrics(metrics_data)\n\nmlflow.log_artifact(artifacts)\n\n```\n\n\nYou can then create the necessary pipeline to train the experiment. By\nadding the relevant rules, you can trigger this pipeline manually in merge\nrequests and observe the report generated as MR Note.\n\n\nWhen the pipeline is finished, you can see the details about the candidate\nin **Analyze > Model Experiments**.\n\n\n![details about the candidate in the finished\npipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676127/Blog/Content%20Images/Screenshot_1.png)\n\n\n## Register the most successful candidate\n\n\nAccording to the measurements you have made, we can register the most\nsuccessful candidate (may be the one with the highest accuracy value) with\nthe Run ID of the candidate.\n\n\nBut first, we need to create a model and its version in Registry. I created\nthese steps in separate stages and components (because I may need these\nsteps in other projects). You should be careful to use semantic versioning\nwhen versioning.\n\n\n### Register source model parameters and metrics\n\n\n```\n\nsource_candidate = client.get_run(source_candidate_id)\n\nparams = { k: v for k, v in source_candidate.data.params.items() }\n\nmetric = { k: v for k, v in source_candidate.data.metrics.items() }\n\n\nmodel_version = client.get_model_version(model_name, version)\n\nrun_id = model_version.run_id\n\nmodel_class = \"\"\n\nfor name, value in params.items():\n    client.log_param(run_id, name, value)\n    if name == \"Class\":\n        model_class = value\n\nfor name, value in metric.items():\n    client.log_metric(run_id, name, value)\n\n```\n\n\nAfter logging the parameters and metrics, you can [register the\nartifacts](https://gitlab.com/gitlab-partners-public/obss/mlops-loan-prediction/-/blob/main/register_candidate.py)\nas you did in the train step.\n\n\nYou may want to manually enter the inputs of the relevant steps as [a\nvariable in the\npipeline](https://gitlab.com/gitlab-partners-public/obss/components/-/blob/main/templates/register-candidate.yml).\n\n\n## CI/CD components\n\n\nI have used [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\nbecause they provide a structured environment for managing machine learning\nworkflows. These components enable reusability by allowing teams to store\nand share standardized scripts, models, and datasets, ensuring that previous\nwork can be easily accessed, modified, and redeployed in future projects,\nthus accelerating development and reducing redundancy.\n\n\n> [Learn more about CI/CD components and the CI/CD\nCatalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/).\n\n\n## Dockerize and deploy an ML app with the registered model\n\n\nIn this project, while registering the model, I also register the pkl file\nas an artifact and then create the docker image with that artifact and send\nit to [GitLab Container\nRegistry](https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga/).\n\n\nYou can now access your Docker image from the Container Registry and deploy\nit to your environment with the method you want.\n\n\n## Resources\n\n- [Model\nexperiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)\n\n- [MLflow client\ncompatibility](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/mlflow_client.html)\n\n- [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\n\n- [Building GitLab with GitLab: Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n\n***Credits:**\n\nThis tutorial and the corresponding sample projects were created and\ngenerously shared with the community by [OBSS](https://obss.tech/en/). OBSS\nis an EMEA-based channel partner of GitLab. They have deep expertise across\nthe whole DevSecOps lifecycle and amongst many other things, they are more\nthan happy to support customers with migrating their MLOps workloads to\nGitLab.*\n",[9,737,109,284],{"slug":1039,"featured":91,"template":693},"build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","content:en-us:blog:build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","Build An Ml App Pipeline With Gitlab Model Registry Using Mlflow","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"_path":1045,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1046,"content":1052,"config":1058,"_id":1060,"_type":14,"title":1061,"_source":16,"_file":1062,"_stem":1063,"_extension":19},"/en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"title":1047,"description":1048,"ogTitle":1047,"ogDescription":1048,"noIndex":6,"ogImage":1049,"ogUrl":1050,"ogSiteName":706,"ogType":707,"canonicalUrls":1050,"schema":1051},"Explore the Dragon Realm: Building a C++ adventure game with AI","How to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663344/Blog/Hero%20Images/compassinfield.jpg","https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2023-08-24\",\n      }",{"title":1053,"description":1048,"authors":1054,"heroImage":1049,"date":1055,"body":1056,"category":10,"tags":1057},"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI",[711],"2023-08-24","Learning, for me, has never been about reading a textbook or sitting in on a\nlecture - it's been about experiencing and immersing myself in a hands-on\nchallenge. This is particulary true for new programming languages. With\n[GitLab Duo Code Suggestions](https://about.gitlab.com/gitlab-duo/),\nartificial intelligence (AI) becomes my interactive guide, providing an\nenvironment for trial, error, and growth. In this tutorial, we will build a\ntext-based adventure game in C++ by using Code Suggestions to learn the\nprogramming language along the way.\n\n\nYou can use this table of contents to navigate into each section. It is\nrecommended to read top-down for the best learning experience.\n\n\n- [Setup](#setup)\n  - [Installing VS Code](#installing-vs-code)\n  - [Installing Clang as a compiler](#installing-clang-as-a-compiler)\n  - [Setting up VS Code](#setting-up-vs-code)\n- [Getting started](#getting-started)\n  - [Compiling and running your program](#compiling-and-running-your-program)\n- [Setting the text adventure stage](#setting-the-adventure-stage)\n\n- [Defining the adventure: Variables](#defining-the-adventure-variables)\n\n- [Crafting the adventure: Making decisions with\nconditionals](#crafting-the-adventure-making-decisions-with-conditionals)\n\n- [Structuring the narrative:\nCharacters](#structuring-the-narrative-characters)\n\n- [Structuring the narrative: Items](#structuring-the-narrative-items)\n\n- [Applying what we've learned at the Grand\nLibrary](#applying-what-weve-learned-at-the-grand-library)\n\n- [See you next time in the Dragon\nRealm](#see-you-next-time-in-the-dragon-realm)\n\n- [Share your feedback](#share-your-feedback)\n\n\n> Download [GitLab Ultimate for free](https://about.gitlab.com/gitlab-duo/)\nfor a trial of GitLab Duo Code Suggestions.\n\n\n## Setup\n\nYou can follow this tutorial in your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors).\nReview the documentation to enable Code Suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nThese installation instructions are for macOS Ventura on M1 Silicon. \n\n\n### Installing VS Code\n\n\n* Download and install [VS Code](https://code.visualstudio.com/download).\n\n* Alternatively, you can also install it as a Homebrew cask: `brew install\n--cask visual-studio-code`.\n\n\n### Installing Clang as a compiler\n\n\n* On macOS, you'll need to install some developer tools. Open your terminal\nand type:\n\n\n```\n\nxcode-select --install\n\n```\n\n\nThis will prompt you to install Xcode's command line tools, which include\nthe [Clang C++ compiler](https://clang.llvm.org/get_started.html).\n\n\nAfter the installation, you can check if `clang++` is installed by typing:\n\n\n```\n\nclang++ --version\n\n```\n\n\nYou should see an output that includes some information about the Clang\nversion you have installed. \n\n\n### Setting up VS Code\n\n\n* Launch VS Code.\n\n* Install and configure [the GitLab Workflow\nextension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\n* Optionally, in VS Code, install the [C/C++ Intellisense\nextension](https://marketplace.visualstudio.com/items?itemName=ms-vscode.cpptools),\nwhich helps with debugging C/C++. \n\n\n## Getting started\n\nNow, let's start building this magical adventure with C++. We'll start with\na \"Hello World\" example.\n\n\nCreate a new project `learn-ai-cpp-adventure`. In the project root, create\n`adventure.cpp`. The first part of every C++ program is the `main()`\nfunction. It's the entry point of the program.\n\n\nWhen you start writing `int main() {`, Code Suggestions will help\nautocomplete the function with some default parameters.\n\n\n![adventure.cpp with a hello world implementation suggested by Code\nSuggestions](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/0-helloworld.png){:\n.shadow}\n\n\n```cpp\n\nint main()\n\n{\n    cout \u003C\u003C \"Hello World\" \u003C\u003C endl;\n    return 0;\n}\n\n```\n\n\nWhile this is a good place to start, we need to add an include and update\nthe output statement:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nThe program prints \"Hello World!\" to the console when executed.\n\n\n* `#include \u003Ciostream>`: Because we are building a text-based adventure, we\nwill rely on input from the player using input and output operations (I/O)\nin C++. This include is a preprocessor directive that tells our program to\ninclude the `iostream` library, which provides facilities to use input and\noutput streams, such as `std::cout` for output.\n\n\n* You might find that Code Suggestions suggests `int main(int argc, char*\nargv[])` as the definition of our main function. The parameters `(int argc,\nchar* argv[])` are used to pass command-line arguments to the program. Code\nSuggestions added them as default parameters, but they are not needed if\nyou're not using command-line arguments. In that case, we can also define\nthe main function as `int main()`.\n\n\n* `std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;`: outputs \"Hello World\" to the\nconsole. The stream operator `\u003C\u003C` is used to send the string to output.\n`std::endl` is an end-line character.\n\n\n* `return 0;`: we use `return 0;` to indicate the end of the `main()`\nfunction and return a value of 0. In C++, it is good practice to return 0 to\nindicate the program has completed successfully.\n\n\n### Compiling and running your program\n\nNow that we have some code, let's review how we'll compile and run this\nprogram. \n\n* Open your terminal or use the terminal in VSCode (View -> Terminal).\n\n* Navigate to your project directory.\n\n* Compile your program by typing:\n\n\n```bash\n\nclang++ adventure.cpp -o adventure\n\n```\n\n\nThis command tells the Clang++ compiler to compile adventure.cpp and create\nan executable named adventure. After this, run your program by typing:\n\n\n```\n\n./adventure\n\n```\n\n\nYou should see \"Hello World!\" printed in the terminal. \n\n\nBecause our tutorial uses a single source file `adventure.cpp`, we can use\nthe compiler directly to build our program. In the future, if the program\ngrows beyond a file, we'll set up additional configurations to handle\ncompilation. \n\n\n## Setting the text adventure stage\n\nBefore we get into more code, let's set the stage for our text adventure.\n\n\nFor this text adventure, players will explore the Dragon Realm. The Dragon\nRealm is full of mountains, lakes, and magic. Our player will enter the\nDragon Realm for the first time, explore different locations, meet new\ncharacters, collect magical items, and journal their adventure. At every\nlocation, they will be offered choices to decide the course of their\njourney.\n\n\nTo kick off our adventure into the Dragon Realm, let's update our\n`adventure.cpp main()` function to be more specific. As you update the\nwelcome message, you might find that Code Suggestions already knows we're\nbuilding a game.\n\n\n![adventure.cpp - Code Suggestions offers suggestion of welcoming users to\nthe Dragon Realm and knows its a\ngame](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/1-welcome-to-the-realm.png){:\n.shadow}\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n## Defining the adventure: Variables\n\nA variable stores data that can be used throughout the program scope in the\n`main()` function. A variable is defined by a type, which indicates the kind\nof data it can hold.\n\n\nLet's create a variable to hold our player's name and give it the type\n`string`. A `string` is designed to hold a sequence of characters so it's\nperfect for storing our player's name.\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nAs you do this, you may notice that Code Suggestions knows what's coming\nnext - prompting the user for their player's name.\n\n\n![adventure.cpp - Code Suggestions suggests welcoming the player with the\nplayerName\nvariable](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2-player-name-variable.png){:\n.shadow}\n\n\nWe may be able to get more complete and specific Code Suggestions by\nproviding comments about what we'd like to do with the name - personally\nwelcome the player to the game. Start by adding our plan of action in\ncomments.\n\n\n```cpp\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n\n    // Display a personalized welcome message to the player with their name\n```\n\n\nTo capture the player's name from input, we need to use the `std::cin`\nobject from the `iostream` library to fetch input from the player using the\nextraction operator `>>`. If you start typing `std::` to start prompting the\nuser, Code Suggestions will make some suggestions to help you gather user\ninput and save it to our `playerName` variable.\n\n\n![adventure.cpp - Code Suggestions prompts the user to input their player\nname](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2.1-player-name-input.png){:\n.shadow}\n\n\nNext, to welcome our player personally to the game, we want to use\n`std::cout` and the `playerName` variable together:\n\n\n```cpp\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n```\n\n\n## Crafting the adventure: Making decisions with conditionals\n\nIt's time to introduce our player to the different locations in tbe Dragon\nRealm they can visit. To prompt our player with choices, we use\nconditionals. Conditionals allow programs to take different actions based on\ncriteria, such as user input.\n\n\nLet's offer the player a selection of locations to visit and capture their\nchoice as an `int` value that corresponds to the location they picked.\n\n\n```cpp\n\n// Display a personalized welcome message to the player with their name\n\nstd::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C\nstd::endl;\n\n\n// Declare an int variable to capture the user's choice\n\nint choice;\n\n```\n\n\nThen, we want to offer the player the different locations that are possible\nfor that choice. Let's start with a comment and prompt Code Suggestions with\n`std::cout` to fill out the details for us.\n\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the\nlocations listed in the code\nbelow](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3-setup-location-choice.png){:\n.shadow}\n\n\nAs you accept the suggestions, Code Suggestions will help build out the\noutput and ask the player for their input.\n\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the\nlocations listed in the code below and asks for player\ninput](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.1-capture-player-location-choice.png){:\n.shadow}\n\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n\n    // Offer the player a choice of 3 locations: 1 for Moonlight Markets, 2 for Grand Library, and 3 for Shimmer Lake.\n    std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go?\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"3. Shimmer Lake\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"Please enter your choice: \";\n    std::cin >> choice;\n```\n\n\nOnce you start typing `std::cin >>` or accept the prompt for asking the\nplayer for their choice, Code Suggestions might offer a suggestion for\nbuilding out your conditional flow. AI is non-deterministic: One suggestion\ncan involve if/else statements while another solution uses a switch\nstatement.\n\n\nTo give Code Suggestions a nudge, we'll add a comment and start typing out\nan if statement: `if (choice ==)`.\n\n\n![adventure.cpp - Code Suggestions suggests using an if statement to manage\nchoice of\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2-if-statement-locations.png){:\n.shadow}\n\n\nAnd if you keep accepting the subsequent suggestions, Code Suggestions will\nautocomplete the code using if/else statements.\n\n\n![adventure.cpp - Code Suggestions helps the user fill out the rest of the\nif/else statements for choosing a\nlocation](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2.1-if-statement-locations-continued.png){:\n.shadow}\n\n\n```cpp\n    // Check the user's choice and display the corresponding messages\n    if (choice == 1) {\n        std::cout \u003C\u003C \"You chose Moonlight Markets\" \u003C\u003C std::endl;\n    }\n    else if (choice == 2) {\n        std::cout \u003C\u003C \"You chose Grand Library\" \u003C\u003C std::endl;\n    }\n    else if (choice == 3) {\n        std::cout \u003C\u003C \"You chose Shimmer Lake\" \u003C\u003C std::endl;\n    }\n    else {\n        std::cout \u003C\u003C \"Invalid choice\" \u003C\u003C std::endl;\n    }\n```\n\n\n`if/else` is a conditional statement that allows a program to execute code\nbased on whether a condition, in this case the player's choice, is true or\nfalse. If the condition evaluates to true, the code inside the braces is\nexecuted.\n\n\n* `if (condition)`: used to check if the condition is true.\n\n* `else if (another condition)`: if the previous condition isn't true, the\nprograms checks this condition.\n\n* `else`: if none of the previous conditions are true.\n\n\nAnother way of managing multiple choices like this example is using a\n`switch()` statement. A `switch` statement allows our program to jump to\ndifferent sections of code based on the value of an expression, which, in\nthis case, is the value of `choice`.\n\n\nWe are going to replace our `if/else` statements with a `switch` statement.\nYou can comment out or delete the `if/else` statements and prompt Code\nSuggestions starting with `switch(choice) {`.\n\n\n![adventure.cpp - Code Suggestions helps the user handle the switch\nstatement for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3-conditional-switch-locations.png){:\n.shadow}\n\n\n![adventure.cpp - Code Suggestions helps the user handle the switch\nstatement for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3.1-conditional-switch-locations-continued.png){:\n.shadow}\n\n\n```cpp\n    // Evaluate the player's decision\n    switch(choice) {\n        // If 'choice' is 1, this block is executed.\n        case 1:\n            std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 2, this block is executed.\n        case 2:\n            std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 3, this block is executed.\n        case 3:\n            std::cout \u003C\u003C \"You chose Shimmer Lake.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is not 1, 2, or 3, this block is executed.\n        default:\n            std::cout \u003C\u003C \"You did not enter 1, 2, or 3.\" \u003C\u003C std::endl;\n    }\n```\n\n\nEach case represents a potential value that the variable or expression being\nswitched on (in this case, choice) could have. If a match is found, the code\nfor that case is executed. We use the `default` case to handle any input\nerrors in case the player enters a value that isn't accounted for.\n\n\nLet's build out what happens when our player visits the Shimmering Lake.\nI've added some comments after the player's arrival at Shimmering Lake to\nprompt Code Suggestions to help us build this out:\n\n\n```cpp\n    // If 'choice' is 3, this block is executed.\n    case 3:\n        std::cout \u003C\u003C \"You chose Shimmering Lake.\" \u003C\u003C std::endl;\n        // The player arrives at Shimmering Lake. It is one of the most beautiful lakes the player has ever seen.\n        // The player hears a mysterious melody from the water.\n        // They can either 1. Stay quiet and listen, or 2. Sing along with the melody.\n\n        break;\n```\n\n\nNow, if you start writing `std::cout` to begin offering the player this new\ndecision point, Code Suggestions will help fill out the output code.\n\n\n![adventure.cpp - Code Suggestions helps fill out the output code based on\nthe comments about the interaction at the\nLake](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4-case-3-output.png){:\n.shadow}\n\n\nYou might find that the code provided by Code Suggestions is very\ndeclarative. Once I've accepted the suggestion, I personalize the code as\nneeded. For example in this case, including the melody the player heard and\nusing the player's name instead of \"you\":\n\n\n![adventure.cpp - I added the playerName to the output and then prompted\nCode Suggestions to continue the narrative based on the comments\nabove](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.1-customizing-output.png){:\n.shadow}\n\n\nI also wanted Code Suggestions to offer suggestions in a specific format, so\nI added an end line:\n\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break\nthe choices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.2-customizing-output-endline.png){:\n.shadow}\n\n\n![adventure.cpp - I added an endline to prompt Code Suggestions to break the\nchoices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.3-sub-choices-output.png){:\n.shadow}\n\n\nNow, we'd like to offer our player a nested choice in this scenario. Before\nwe can define the new choices, we need a variable to store this nested\nchoice. Let's define a new variable `int nestedChoice` in our `main()`\nfunction, outside of the `switch()` statement we set up. You can put it\nafter our definition of the `choice` variable.\n\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n```\n\n\nNext, returning to the `if/else` statement we were working on in `case 3`,\nwe want to prompt the player for their decision and save it in\n`nestedChoice`.\n\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break\nthe choices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.4-capture-nested-choice.png){:\n.shadow}\n\n\nAs you can see, Code Suggestions wants to go ahead and handle the user's\nchoice using another `switch` statement. I would prefer to use an `if/else`\nstatement to handle this decision point.\n\n\nFirst, let's add some comments to give context:\n\n\n```cpp\n    // Capture the user's nested choice\n    std::cin >> nestedChoice;\n\n    // If the player chooses 1 and remains silent, they hear whispers of the merfolk below, but nothing happens.\n    // If the player chooses 2 and sings along, a merfolk surfaces and gifts them a special blue gem as a token of appreciation for their voice.\n\n    // Evaluate the user's nestedChoice\n```\n\n\nThen, start typing `if (nestedChoice == 1)` and Code Suggestions will start\nto offer suggestions:\n\n\n![adventure.cpp - Code Suggestions starts to build out an if statement to\nhandle the\nnestedChoice](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5-nested-choice-if.png){:\n.shadow}\n\n\nIf you tab to accept them, Code Suggestions will continue to fill out the\nrest of the nested `if/else` statements.\n\n\nSometimes, while you're customizing the suggestions that Code Suggestions\ngives, you may even discover that it would like to make creative\nsuggestions, too!\n\n\n![adventure.cpp - Code Suggestions makes a creative suggestion to end the\ninteraction with the merfolk by saying \"You are now free to go\" after you\nreceive the\ngem.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5.2-nested-cs-creative-suggestion.png){:\n.shadow}\n\n\nHere's the code for `case 3` for the player's interaction at Shimmering Lake\nwith the nested decision. I've updated some of the narrative dialogue\nplayer's name.\n\n```\n    // Handle the Shimmering Lake scenario.\n    case 3:\n        std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n\n        // Capture the user's nested choice\n        std::cin >> nestedChoice;\n\n        // If the player chooses to remain silent\n        if (nestedChoice == 1)\n        {\n            std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n        }\n        // If the player chooses to sing along with the melody\n        else if (nestedChoice == 2)\n        {\n            std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                    \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                    \u003C\u003C std::endl;\n        }\n        break;\n```\n\n\nOur player isn't limited to just exploring Shimmering Lake. There's a whole\nrealm to explore and they might want to go back and explore other locations.\n\n\nTo facilitate this, we can use a `while` loop. A loop is a type of\nconditional that allows a specific section of code to be executed multiple\ntimes based on a condition. For the `condition` that allows our `while` loop\nto run multiple times, let's use a `boolean` to initialize the loop\ncondition.\n\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n        // wrap the code for switch(choice)\n    }\n```\n\n\nWe also need to move our location prompt inside the `while` loop so that the\nplayer can visit more than one location at the time.\n\n\n![adventure.cpp - CS helps us write a go next prompt for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6-while-loop-go-next.png){:\n.shadow}\n\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n```\n\n\nOur `while` loop will keep running as long as `exploring` is `true`, so we\nneed a way for the player to have the option to exit the game. Let's add a\ncase 4 that allows the player to exit by setting `exploring = false`. This\nwill exit the loop and take the player back to the original choices.\n\n\n```cpp\n    // Option to exit the game\n    case 4:\n        exploring = false;\n        break;\n```\n\n\n**Async exercise**: Give the player the option to exit the game instead of\nexploring a new decision.\n\n\nWe also need to update the error handling for invalid inputs in the `switch`\nstatement. You can decide whether to end the program or use the `continue`\nstatement to start a new loop iteration.\n\n\n```cpp\n        default:\n            std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n            continue; // Errors continue with the next loop iteration\n```\n\n\nUsing I/O and conditionals is at the core of text-based adventure games and\nhelps make these games interactive. We can combine user input, display\noutput, and implement our narrative into decision-making logic to create an\nengaging experience.\n\n\nHere's what our `adventure.cpp` looks like now with some comments:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nHere's what the build output looks like if we run `adventure.cpp` and the\nplayer heads to the Shimmering Lake.\n\n\n![adventure.cpp build output - the player is called sugaroverflow and heads\nto the Shimmering Lake and receives a\ngem](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6.1-full-case-3-output.png){:\n.shadow}\n\n\n## Structuring the narrative: Characters\n\nOur player can now explore the world. Soon, our player will also be able to\nmeet people and collect objects. Before we can do that, let's organize the\nthings our player can do with creating some structure for the player\ncharacter.\n\n\nIn C++, a `struct` is used to group different data types. It's helpful in\ncreating a group of items that belong together, such as our player's\nattributes and inventory, into a single unit. `struct` objects are defined\nglobally, which means at top the file, before the `main() function.\n\n\nIf you start typing `struct Player {`, Code Suggestions will help you out\nwith a sample definition of a player struct.\n\n\n![adventure.cpp - Code Suggestions helps with setting up the struct\ndefinition for the\nplayer](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4-player-struct-definition.png){:\n.shadow}\n\n\nAfter accepting this suggestion, you might find that Code Suggestions is\neager to define some functions to make this game more fun, such as hunting\nfor treasure.\n\n\n![adventure.cpp - Code Suggestions provides a suggestion for creating\nfunctions to hunt for\ntreasure.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.1-player-struct-treasure-suggestion.png){:\n.shadow}\n\n\n```cpp\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n```\n\n\nGiving the player experience points was not in my original plan for this\ntext adventure game, but Code Suggestions makes an interesting suggestion.\nWe could use `xp` for leveling up or for other game mechanics as our project\ngrows.\n\n\n`struct Player` provides a blueprint for creating a player and details the\nattributes that make up a player. To use our player in our code, we must\ninstantiate, or create, an object of the `Player` struct within our `main()`\nfunction. Objects in C++ are instances of structures that contain\nattributes. In our example, we're working with the `Player` struct, which\nhas attributes like name, health, and xp.\n\n\nAs you're creating a `Player` object, you might find that Code Suggestions\nwants to name the player \"John.\"\n\n\n![adventure.cpp - code suggestions suggests naming the new Player object\nJohn.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.2-player-struct-instance-john.png){:\n.shadow}\n\n\n```cpp\n\nint main() {\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n```\n\n\nInstead of naming our player \"John\" for everyone, we'll use the `Player`\nobject to set the attribute for name. When we want to interact with or\nmanipulate an attribute of an object, we use the dot operator `.`. The dot\noperator allows us to access specific members of the object. We can set the\nplayer's name using the dot operator with `player.name`.\n\n\nNote that we need to replace other mentions of `playerName` the variable\nwith `player.name`, which allows us to access the player object's name\ndirectly.\n\n\n* Search for all occurrences of the `playerName` variable, and replace it\nwith `player.name`.\n\n* Comment/Remove the unused `std::string playerName` variable after that.\n\n\nWhat your `adventure.cpp` will look like now:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n## Structuring the narrative: Items\n\nAn essential part of adventure games is a player's inventory - the\ncollection of items they acquire and use during their journey. For example,\nat Shimmering Lake, the player acquired a blue gem.\n\n\nLet's update our Player `struct` to include an inventory using an array. In\nC++, an `array` is a collection of elements of the same type that can be\nidentified by an index. When creating an array, you need to specify its type\nand size. Start by adding `std::string inventory` to the Player `struct`:\n\n\n![adventure.cpp - Code Suggestions shows us how to add an array of strings\nto the player struct to use as the players\ninventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5-add-inventory-player-struct.png){:\n.shadow}\n\n\nYou might find that Code Suggestions wants our player to be able to carry\nsome gold, but we don't need that for now. Let's also add `int\ninventoryCount;` to keep track of the number of items in our player's\ninventory.\n\n\n![adventure.cpp - Code Suggestions shows us how to add an integer for\ninventoryCount to the player\nstruct](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.1-add-inventory-count-player-struct.png){:\n.shadow}\n\n\n```cpp\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;  // The number of items in the player's inventory.\n};\n\n```\n\nIn our Player `struct`, we have defined an array for our inventory that can\nhold the names of 10 items (type:string, size: 10). As the player progresses\nthrough our story, we can assign new items to the inventory array based on\nthe player's actions using the array index.\n\n\nSometimes Code Suggestions gets ahead of me and tries to add more complexity\nto the game by suggesting that we need to create a `struct` for some\nMonsters. Maybe later, Code Suggestions!\n\n\n![adventure.cpp - Code Suggestions wants to add a struct for Monsters we can\nbattle](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.2-suggestion-gets-distracted-by-monsters.png\n\n){: .shadow}\n\n\nBack at the Shimmering Lake, the player received a special blue gem from the\nmerfolk. Let's update the code in `case 2` for the Shimmering Lake to add\nthe gem to our player's inventory.\n\n\nYou can start by accessing the player's inventory with `player.inventory`\nand Code Suggestions will help add the gem.\n\n\n![adventure.cpp - Code Suggestions shows us how to add a gem to the player's\ninventory using a post-increment operation and the inventory array from the\nstruct\nobject](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.3-add-gem-to-inventory.png){:\n.shadow}\n\n\n```cpp\n    // If the player chooses to sing along with the melody\n    else if (nestedChoice == 2)\n    {\n        std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                \u003C\u003C std::endl;\n        player.inventory[player.inventoryCount] = \"Blue Gem\";\n        player.inventoryCount++;\n    }\n```\n\n\n* `player.inventory`: accesses the inventory attribute of the player object\n\n* `player.inventoryCount`: accesses the integer that keeps track of how many\nitems are currently in the player's inventory. This also represents the next\navailable index in our inventory array where an item can be stored.\n\n* `player.inventoryCount++`: increments the value of inventoryCount by 1.\nThis is a post-increment operation. We are adding “Blue Gem” to the next\navailable slot in the inventory array and incrementing the array for the\nnewly added item.\n\n\nOnce we've added something to our player's inventory, we may also want to be\nable to look at everything in the inventory. We can use a `for` loop to\niterate over the inventory array and display each item.\n\n\nIn C++, a `for` loop allows code to be repeatedly executed a specific number\nof times. It's different from the `while` loop we used earlier because the\n`while` executes its body based on a condition, whereas a `for` loop\niterates over a sequence or range, usually with a known number of times.\n\n\nAfter adding the gem to the player's inventory, let's display all the items\nit has. Try starting a for loop with `for ( ` to display the player's\ninventory and Code Suggestions will help you with the syntax.\n\n\n![adventure.cpp - Code Suggestions demonstrates how to write a for loop to\nloop through the players\ninventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.4-loop-over-players-inventory.png){:\n.shadow}\n\n\n```cpp\n\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n\n// Loop through the player's inventory up to the count of items they have\n\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n\n{\n    // Output the item in the inventory slot\n    std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n}\n\n```\n\n\nA `for` loop consists of 3 main parts:\n\n\n* `int i = 0`: is the initialization where you set up your loop variable.\nHere, we start counting from 0.\n\n* `i \u003C player.inventoryCount`: is the condition we're looping on, our loop\nchecks if `i`, the current loop variable, is less than the number of items\nin our inventory. It will keep going until this is true.\n\n* `i++`: is the iteration. This updates the loop variable each time the loop\nruns.\n\n\nTo make sure that our loop doesn't encounter an error, let's add some error\nhandling to make sure the inventory is not empty when we try to output it.\n\n\n```\n\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n\n// Loop through the player's inventory up to the count of items they have\n\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n\n{\n    // Check if the inventory slot is not empty.\n    if (!player.inventory[i].empty())\n    {\n        // Output the item in the inventory slot\n        std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n    }\n}\n\n```\n\n\nWith our progress so far, we've successfully established a persistent\n`while` loop for our adventure, handled decisions, crafted a `struct` for\nour player, and implemented a simple inventory system. Now, let's dive into\nthe next scenario, the Grand Library, applying the foundations we've\nlearned.\n\n\n**Async exercise**: Add more inventory items found in different locations.\n\n\nHere's what we have for `adventure.cpp` so far:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;\n};\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"--------------------------------------------------------\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                    player.inventory[player.inventoryCount] = \"Blue Gem\";\n                    player.inventoryCount++;\n\n                    std::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n                    // Loop through the player's inventory up to the count of items they have\n                    for (int i = 0; i \u003C player.inventoryCount; i++)\n                    {\n                        // Check if the inventory slot is not empty.\n                        if (!player.inventory[i].empty())\n                        {\n                            // Output the item in the inventory slot\n                            std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n                        }\n                    }\n\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n![adventure.cpp - A full output of the game at the current state - our\nplayer sugaroverflow visits the Lake, receives the gem, adds it to their\ninventory, and we display the inventory before returning to the\nloop](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.5-full-output-shimmering-lake.png){:\n.shadow}\n",[496,9,759,716,737],{"slug":1059,"featured":6,"template":693},"building-a-text-adventure-using-cplusplus-and-code-suggestions","content:en-us:blog:building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","Building A Text Adventure Using Cplusplus And Code Suggestions","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"_path":1065,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1066,"content":1072,"config":1086,"_id":1088,"_type":14,"title":1089,"_source":16,"_file":1090,"_stem":1091,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":1067,"description":1068,"ogTitle":1067,"ogDescription":1068,"noIndex":6,"ogImage":1069,"ogUrl":1070,"ogSiteName":706,"ogType":707,"canonicalUrls":1070,"schema":1071},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":1067,"description":1068,"authors":1073,"heroImage":1069,"date":1076,"body":1077,"category":1078,"tags":1079},[1074,1075],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code\nSuggestions](https://about.gitlab.com/solutions/code-suggestions/) need\nlow-latency response times for a frictionless developer experience. Users\ndon’t want to interrupt their flow and wait for a code suggestion to show\nup. To ensure GitLab Duo can provide the right suggestion at the right time\nand meet high performance standards for critical AI infrastructure, GitLab\nrecently launched our first multi-region service to deliver AI features.\n\n\nIn this article, we will cover the benefits of multi-region services, how we\nbuilt an internal platform codenamed ‘Runway’ for provisioning and deploying\nmulti-region services using GitLab features, and the lessons learned\nmigrating to multi-region in production.\n\n\n## Background on the project\n\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning,\ndeploying, and operating containerized services. Runway's purpose is to\nenable GitLab service owners to self-serve infrastructure needs with\nproduction readiness out of the box, so application developers can focus on\nproviding value to customers. As part of [our corporate value of\ndogfooding](https://handbook.gitlab.com/handbook/values/#results), the first\niteration was built in 2023 by the Infrastructure department on top of core\nGitLab capabilities, such as continuous integration/continuous delivery\n([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and\ndeployments.\n\n\nBy establishing automated GitOps best practices, Runway services use\ninfrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\n\nGitLab Duo is primarily powered by [AI\nGateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist),\na satellite service written in Python outside of GitLab’s modular monolith\nwritten in Ruby. In cloud computing, a region is a geographical location of\ndata centers operated by cloud providers.\n\n\n## Defining a multi-region strategy\n\n\nDeploying in a single region is a good starting point for most services, but\ncan come with downsides when you are trying to reach a global audience.\nUsers who are geographically far from where your service is deployed may\nexperience different levels of service and responsiveness than those who are\ncloser. This can lead to a poor user experience, even if your service is\nwell built in all other respects.\n\n\nFor AI Gateway, it was important to meet global customers wherever they are\nlocated, whether on GitLab.com or self-managed instances using Cloud\nConnector. When a developer is deciding to accept or reject a code\nsuggestion, milliseconds matter and can define the user experience.\n\n\n### Goals\n\n\nMulti-region deployments require more infrastructure complexity, but for use\ncases where latency is a core component of the user experience, the benefits\noften outweigh the downsides. First, multi-region deployments offer\nincreased responsiveness to the user. By serving requests from locations\nclosest to end users, latency can be significantly reduced. Second,\nmulti-region deployments provide greater availability. With fault tolerance,\nservices can fail over during a regional outage. There is a much lower\nchance of a service failing completely, meaning users should not be\ninterrupted even in partial failures.\n\n\nBased on our goals for performance and availability, we used this\nopportunity to create a scalable multi-region strategy in Runway, which is\nbuilt leveraging GitLab features.\n\n\n### Architecture\n\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud\nPlatform (GCP). As a result, Runway’s first supported platform runtime is\nCloud Run. The initial workloads deployed on Runway are stateless satellite\nservices (e.g., AI Gateway), so Cloud Run services are a good fit that\nprovide a clear migration path to more complex and flexible platform\nruntimes, e.g. Kubernetes.\n\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to\niterate and tease out the right level of abstractions for service owners as\npart of a platform play in the Infrastructure department.\n\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region\ndeployment strategy must support global load balancing, and the provisioning\nand configuration of regional resources. Here’s a simplified diagram of the\nproposed architecture in GCP:\n\n\n![simplified diagram of the proposed architecture in\nGCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\n\nBy replicating Cloud Run services across multiple regions and configuring\nthe existing global load balancing with serverless network endpoint group\n(NEG) backends, we’re able to serve traffic from multiple regions. For the\nremainder of the article, we’ll focus less on specifics of Cloud Run and\nmore on how we’re building with GitLab.\n\n\n## Building a multi-region platform with GitLab\n\n\nNow that you have context about Runway, let's walk through how to build a\nmulti-region platform using GitLab features.\n\n\n### Provision\n\n\nWhen building an internal platform, the first challenge is provisioning\ninfrastructure for a service. In Runway, Provisioner is the component that\nis responsible for maintaining a service inventory and managing IaC for GCP\nresources using Terraform.\n\n\nTo provision a service, an application developer will open an MR to add a\nservice project to the inventory using git, and Provisioner will create\nrequired resources, such as service accounts and identity and access\nmanagement policies. When building this functionality with GitLab, Runway\nleverages [OpenID Connect (OIDC) with GPC Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/)\nfor managing IaC.\n\n\nAdditionally, Provisioner will create a deployment project for each service\nproject. The purpose of creating separate projects for deployments is to\nensure the [principle of least\nprivilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/)\nby authenticating as a GCP service account with restricted permissions.\nRunway leverages the [Projects\nAPI](https://docs.gitlab.com/ee/api/projects.html) for creating projects\nwith [Terraform\nprovider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\n\nFinally, Provisioner defines variables in the deployment project for the\nservice account, so that deployment CI jobs can authenticate to GCP. Runway\nleverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and\n[Job Token\nallowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist)\nto handle authentication and authorization.\n\n\nHere’s a simplified example of provisioning a multi-region service in the\nservice inventory:\n\n\n```\n\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n\n```\n\n\nOnce provisioned, a deployment project and necessary infrastructure will be\ncreated for a service.\n\n\n### Configure\n\n\nAfter a service is provisioned, the next challenge is the configuration for\na service. In Runway,\n[Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl)\nis a component that is responsible for configuring and deploying services by\naligning the actual state with the desired state using Golang and Terraform.\n\n\nHere’s a simplified example of an application developer configuring GitLab\nCI/CD in their service project:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n\n```\n\n\nRunway provides sane default values for configuration that are based on our\nexperience in delivering stable and reliable features to customers.\nAdditionally, service owners can configure infrastructure using a service\nmanifest file hosted in a service project. The service manifest uses JSON\nSchema for validation. When building this functionality with GitLab, Runway\nleverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema\ndocumentation.\n\n\nTo deliver this part of the platform, Runway leverages [CI/CD\ntemplates](https://docs.gitlab.com/ee/development/cicd/templates.html),\n[Releases](https://docs.gitlab.com/ee/user/project/releases/), and\n[Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for\nintegrating with service projects.\n\n\nHere’s a simplified example of a service manifest:\n\n\n```\n\n# .runway/runway-production.yml\n\napiVersion: runway/v1\n\nkind: RunwayService\n\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n\n```\n\n\nFor multi-region services, Runway injects an environment variable into the\ncontainer instance runtime, e.g. RUNWAY\\_REGION, so application developers\nhave the context to make any downstream dependencies regionally-aware, e.g.\nVertex AI API.\n\n\nOnce configured, a service project will be integrated with a deployment\nproject.\n\n\n### Deploy\n\n\nAfter a service project is configured, the next challenge is deploying a\nservice. In Runway, Reconciler handles this by triggering a deployment job\nin the deployment project when an MR is merged to the main branch. When\nbuilding this functionality with GitLab, Runway leverages [Trigger\nPipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project\nPipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines)\nto trigger jobs from service project to deployment project.\n\n\n![trigger jobs from service project to deployment\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\n\nOnce a pipeline is running in a deployment project, it will be deployed to\nan environment. By default, Runway will provision staging and production\nenvironments for all services. At this point, Reconciler will apply any\nTerraform resource changes for infrastructure. When building this\nfunctionality with GitLab, Runway leverages\n[Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and\n[GitLab-managed Terraform\nstate](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html)\nfor each service.\n\n\n![Reconciler applies any Terraform resource changes for\ninfrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\n\nRunway provides default application metrics for services. Additionally,\ncustom metrics can be used by enabling a sidecar container with\nOpenTelemetry Collector configured to scrape Prometheus and remote write to\nMimir. By providing observability out of the box, Runway is able to bake\nmonitoring into CI/CD pipelines.\n\n\nExample scenarios include gradual rollouts for blue/green deployments,\npreventing promotions to production when staging is broken, or automatically\nrolling back to previous revision when elevated error rates occur in\nproduction.\n\n\n![Runway bakes monitoring into CI/CD\npipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\n\nOnce deployed, environments will serve the latest revision of a service. At\nthis point, you should have a good understanding of some of the challenges\nthat will be encountered, and how to solve them with GitLab features.\n\n\n## Migrating to multi-region in production\n\n\nAfter extending Runway components to support multi-region in Cloud Run, the\nfinal challenge was migrating from AI Gateway’s single-region deployment in\nproduction with zero downtime. Today, teams using Runway to deploy their\nservices can self-serve on regions making a multi-region deployment just as\nsimple as a single-region deployment. \n\n\nWe were able to iterate on building multi-region functionality without\nimpacting existing infrastructure by using semantic versioning for Runway.\nNext, we’ll share some learnings from the migration that may inform how to\noperate services for an internal multi-region platform.\n\n\n### Dry run deployments\n\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off\nis that plans cannot be verified in advance, which could risk inadvertently\ndestroying or misconfiguring production infrastructure. To solve this\nproblem, Runway will perform a “dry run” deployment for MRs.\n\n\n![\"Dry run\"\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\n\nFor migrating AI Gateway, dry run deployments increased confidence and\nhelped mitigate risk of downtime during rollout. When building an internal\nplatform with GitLab, we recommend supporting dry run deployments from the\nstart.\n\n\n### Regional observability\n\n\nIn Runway, existing observability was aggregated by assuming a single-region\ndeployment. To solve this problem, Runway observability was retrofitted to\ninclude a new region label for Prometheus metrics.\n\n\nOnce metrics were retrofitted, we were able to introduce service level\nindicators (SLIs) for both regional Cloud Run services and global load\nbalancing. Here’s an example dashboard screenshot for a general Runway\nservice:\n\n\n![dashboard screenshot for a general Runway\nservice](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nAdditionally, we were able to update our service level objectives (SLOs) to\nsupport regions. As a result, service owners could be alerted when a\nspecific region experiences an elevated error rate, or increase in response\ntimes.\n\n\n![screenshot of\nalerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nFor migrating AI Gateway, regional observability increased confidence and\nhelped provide more visibility into new infrastructure. When building an\ninternal platform with GitLab, we recommend supporting regional\nobservability from the start.\n\n\n### Self-service regions\n\n\nThe Infrastructure department successfully performed the initial migration\nof multi-region support for AI Gateway in production with zero downtime.\nGiven the risk associated with rolling out a large infrastructure migration,\nit was important to ensure the service continued working as expected.\n\n\nShortly afterwards, service owners began self-serving additional regions to\nmeet the growth of customers. At the time of writing, [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) is available in six regions\naround the globe and counting. Service owners are able to configure the\ndesired regions, and Runway will provide guardrails along the way in a\nscalable solution.\n\n\nAdditionally, three other internal services have already started using\nmulti-region functionality on Runway. Application developers have entirely\nself-served functionality, which validates that we’ve provided a good\nplatform experience for service owners. For a platform play, a scalable\nsolution like Runway is considered a good outcome since the Infrastructure\ndepartment is no longer a blocker.\n\n\n## What’s next for Runway\n\n\nBased on how quickly we could iterate to provide results for customers, the\nSaaS Platforms department has continued to invest in Runway. We’ve grown the\nRunway team with additional contributors, started evolving the platform\nruntime (e.g. Google Kubernetes Engine), and continue dogfooding with\ntighter integration in the product.\n\n\nIf you’re interested in learning more, feel free to check out\n[https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n\n## More Building GitLab with GitLab\n\n- [Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n- [Stress-testing Product\nAnalytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n\n- [Web API Fuzz\nTesting](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n\n- [How GitLab.com inspired\nDedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n\n- [Expanding our security certification\nportfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n","engineering",[109,1080,1081,1082,737,1083,1084,1085,716,9],"CD","CI","inside GitLab","performance","google","git",{"slug":1087,"featured":91,"template":693},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":1093,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1094,"content":1100,"config":1106,"_id":1108,"_type":14,"title":1109,"_source":16,"_file":1110,"_stem":1111,"_extension":19},"/en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"title":1095,"description":1096,"ogTitle":1095,"ogDescription":1096,"noIndex":6,"ogImage":1097,"ogUrl":1098,"ogSiteName":706,"ogType":707,"canonicalUrls":1098,"schema":1099},"Can your CI/CD environment support AI-powered DevSecOps? ","Unlock the value of AI-powered software development with a DevSecOps platform capable of supporting CI/CD hyperscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683522/Blog/Hero%20Images/AdobeStock_659839979.jpg","https://about.gitlab.com/blog/can-your-ci-cd-environment-support-ai-powered-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can your CI/CD environment support AI-powered DevSecOps? \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2024-01-03\",\n      }",{"title":1095,"description":1096,"authors":1101,"heroImage":1097,"date":1103,"body":1104,"category":10,"tags":1105},[1102],"Darren Eastman","2024-01-03","Our customers are experiencing a significant increase in the efficiency and pace of software development with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities powering your workflow. This will likely correspond to a rise in the two [DORA metrics](https://about.gitlab.com/solutions/value-stream-management/dora/) that measure velocity: deployment frequency and lead time for changes. However, what may not be as obvious is that the age of AI-powered software development has also ushered in a new era of large-scale computing for CI/CD jobs. Organizations will need to learn how to support this CI/CD hyperscale.\n\n## What is CI/CD hyperscale?\n\nAs a quick recap, a [CI/CD](https://about.gitlab.com/topics/ci-cd/) job is a unit of work that is executed on a host computing system each time a developer pushes a code change to a project repository. This core guiding principle has accelerated value creation by software development teams over the past few years. Instead of focusing on the ceremonies of legacy project management, today’s leading software development teams deliver value by adding small increments to a software product regularly – weekly, daily, and even hourly.\n\nThe CI/CD engine supports this modern pattern of software product development by enabling development teams to define automation to continuously build, test, and integrate any new software change. Some of our larger customers who have transformed their software development practices have already attained what we at GitLab have labeled as CI/CD hyperscale. That is, they are typically running 3 million or more CI/CD jobs per month.\n\n## How to support AI-fueled CI/CD growth\n\nWith the advent of AI-powered DevSecOps, we hypothesize that, starting in 2024, organizations will see a 2x increase per year in the number of CI/CD jobs run by development teams leveraging AI-assisted features across the software development lifecycle. Starting with 3 million jobs per month as the baseline and assuming a 2% growth rate per month, the chart below illustrates the potential impact in the growth of CI/CD jobs monthly due to efficiency gains with AI-powered DevSecOps.\n\n![cicd hyperscale chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683548/Blog/Content%20Images/image1.png)\n\nSo, what steps can you take to ensure your organization is positioned for success in this hyper-competitive new age of AI-powered DevSecOps? \n\nHere are a few pointers to get you started:\n\n- Analyze whether you have enabled your development teams – the creators of value – with the flexibility to adopt modern patterns in software product development. \n- Inventory the tools you use to support creative work, including project and task management.\n- Inventory the tools you use for software development and DevOps. Are you using multiple types of CI systems across your environment? If so, the next step is to gain a deeper understanding of why that is. \n- Create a plan to migrate all software development teams from multiple point solutions to one DevSecOps platform. \n\nYour software development teams may have built up extensive tooling using various CI systems and point solutions and may question the return on investment of a potentially time-consuming consolidation and migration effort. However, based on our internal data, customers adopting GitLab [realize payback in less than six months](https://about.gitlab.com/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops/).  \n\nIf you have already adopted the GitLab AI-powered DevSecOps Platform and are consolidating your platform engineering and software development processes, then you are well on your way to unlocking the value of AI-powered software development and having a solution capable of supporting CI/CD hyperscale. \n\nAs the pioneer of the integrated DevSecOps platform, we have been at the forefront of supporting CI/CD hyperscale for customers running CI/CD workloads on the fully managed GitLab SaaS CI/CD build environment or their own self-managed build infrastructure. That focused investment over the past decade has resulted in the development of the most scalable and flexible CI/CD engine – the core of the GitLab DevSecOps Platform. Look no further than our milestone of [more than 1 billion pipelines run on GitLab's SaaS-based DevSecOps Platform](https://about.gitlab.com/blog/one-billion-pipelines-cicd/). \n\nSo with GitLab CI and GitLab Runner, the ultimate CI/CD execution engine, the GitLab DevSecOps Platform is a solution that provides the foundation to continuously improve and transform your value creation processes while supporting the scale required to meet the competitive demands of the new age of AI.\n\n## Learn how to achieve CI/CD hyperscale\n\nIf you are new to GitLab and are interested in learning how we can help you transform your software development processes, [contact our sales team](https://about.gitlab.com/sales/) to help you with a custom demo and get you going on your adoption of AI-powered DevSecOps.\n",[109,716,9],{"slug":1107,"featured":6,"template":693},"can-your-ci-cd-environment-support-ai-powered-devsecops","content:en-us:blog:can-your-ci-cd-environment-support-ai-powered-devsecops.yml","Can Your Ci Cd Environment Support Ai Powered Devsecops","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops.yml","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"_path":1113,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1114,"content":1120,"config":1126,"_id":1128,"_type":14,"title":1129,"_source":16,"_file":1130,"_stem":1131,"_extension":19},"/en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"title":1115,"description":1116,"ogTitle":1115,"ogDescription":1116,"noIndex":6,"ogImage":1117,"ogUrl":1118,"ogSiteName":706,"ogType":707,"canonicalUrls":1118,"schema":1119},"Chat about your merge request with GitLab Duo","Learn how to use AI-powered Chat to quickly understand complex merge requests by asking about implementation choices, potential risks, and architectural decisions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675536/Blog/Hero%20Images/blog-image-template-1800x945__2_.png","https://about.gitlab.com/blog/chat-about-your-merge-request-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Chat about your merge request with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2024-11-22\",\n      }",{"title":1115,"description":1116,"authors":1121,"heroImage":1117,"date":1123,"body":1124,"category":10,"tags":1125},[1122],"Torsten Linz","2024-11-22","Managing a merge request (MR) is an integral part of collaborative development, involving navigating through code changes, discussions, and dependencies to ensure high-quality outcomes. Whether you’re reviewing someone else’s code or trying to make your own changes clearer, the new [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) capability, available in GitLab Duo Enterprise, can help simplify your workflow. Now, you can have a conversation with GitLab Duo Chat about an MR, directly inside GitLab.\n\n## What GitLab Duo Chat brings to an MR workflow\n\nImagine jumping into a merge request titled \"Add logging to order processing.\" Your goal is to onboard yourself to the MR as quickly as possible and to review it. You can use GitLab Duo Chat to onboard yourself faster and understand critical questions to accelerate your review:\n\n* \"Do the logs cover all failure scenarios, or are there any gaps where an issue might not be traceable?\"  \n* “Are there any potential privacy concerns with the logged data?\"  \n* \"Why was logging added at these specific points in the order processing workflow, and how does it help with debugging or monitoring?\"\n\n![MR context example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675670/Blog/Content%20Images/MR_Context_example.png)\n\nThese are the kinds of questions that GitLab Duo Chat is ready to answer – questions that let you quickly understand the intentions behind the changes and uncover any potential risks before diving into the details. Instead of spending a lot of time trying to follow code paths or waiting on the author to reply to your questions, you can start getting answers right away, saving valuable time.\n\n## In-depth conversations about MRs\n\nThe magic of this new chat capability isn’t just in summarizing code – it’s in its ability to support in-depth conversations about the MR at hand. Let's assume the logging MR also includes notifications and refactoring. You can ask specific, insightful questions, such as:\n\n* “What are the potential network failure points introduced by refactoring the payment service into a microservice?”  \n* \"Were there any trade-offs made in terms of consistency or accuracy for better performance?\"  \n* \"How are failures in sending notifications handled? Are retries implemented?\"\n\nInstead of simply telling you what changes have been made, GitLab Duo Chat helps you understand *why* those changes were made, what risks are involved, and how to mitigate them. It lets you dig deep and explore the context behind every line of code, every architectural decision, and every change in behavior within the specific MR you are working on.\n\nAnd it doesn't end with that one answer. You can engage in a follow-up conversation to dig deeper or to explore. \n\n## An evolving conversation tool\n\nWe’re really excited about how GitLab Duo Chat is evolving to become a true conversational partner for MR authors and reviewers alike. GitLab Duo Chat is [aware of the MR description, discussions, the code diff, and metadata of a single MR](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of). It’s like having an assistant who is well-versed in your MR and ready to explain any part of it – or even rewrite parts, if that’s what you need.\n\nWith GitLab Duo Chat, onboarding yourself to a complex MR or understanding a change in-depth is faster and more intuitive than ever before.\n\n## We need your feedback\n\nWe’re eager to hear how GitLab Duo Chat works for you. All feedback helps us refine this feature and make it even more useful. Please share your experiences by commenting on our [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/464587). Please include the questions you asked, the response you got, and whether it helped you move forward. Together, we can make GitLab Duo Chat an indispensable tool for every merge request!\n\nFor a deeper dive into how to use GitLab Duo Chat, check out our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples#ask-about-a-specific-merge-request) or watch our introductory video below. Start your first conversation today and let us know what you think!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=7W4mHWw2iUOzoTUz\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->  \n\n> Sample this new capability with [a free trial of GitLab Ultimate and GitLab Duo Enterprise](https://gitlab.com/-/trials/new).\n\n## Learn more about GitLab Duo Chat\n\n- [GitLab Duo Chat: Get to know productivity-boosting AI enhancements](https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements/)\n- [GitLab Duo Chat, your at-the-ready AI assistant, is now generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)",[9,496,717,737,738,759],{"slug":1127,"featured":6,"template":693},"chat-about-your-merge-request-with-gitlab-duo","content:en-us:blog:chat-about-your-merge-request-with-gitlab-duo.yml","Chat About Your Merge Request With Gitlab Duo","en-us/blog/chat-about-your-merge-request-with-gitlab-duo.yml","en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"_path":1133,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1134,"content":1139,"config":1144,"_id":1146,"_type":14,"title":1147,"_source":16,"_file":1148,"_stem":1149,"_extension":19},"/en-us/blog/code-suggestions-for-all-during-beta",{"title":1135,"description":1136,"ogTitle":1135,"ogDescription":1136,"noIndex":6,"ogImage":868,"ogUrl":1137,"ogSiteName":706,"ogType":707,"canonicalUrls":1137,"schema":1138},"Code Suggestions available to all GitLab tiers while in Beta","All users can acess Code Suggestions AI-assisted feature while it is in Beta.","https://about.gitlab.com/blog/code-suggestions-for-all-during-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Code Suggestions available to all GitLab tiers while in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-05-16\",\n      }",{"title":1135,"description":1136,"authors":1140,"heroImage":868,"date":1141,"body":1142,"category":10,"tags":1143},[873],"2023-05-16","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nCode Suggestions is now available on GitLab.com for all users for free while the feature is in Beta. Teams can boost efficiency with the help of generative AI that suggests code while you're developing. We've extended language support from our initial six languages to now include 13 languages: C/C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. \n\nWe are making improvements to the Code Suggestions underlying AI model weekly to improve the quality of suggestions. Please remember that AI is non-deterministic, so you may not get the same suggestion week to week.\n\n## Privacy first\nCode Suggestions is built with privacy as a critical foundation. It keeps your proprietary source code secure within GitLab's enterprise cloud infrastructure, and this code isn't used as training data. Source code inference against the Code Suggestions model is not used to re-train the model. Learn about [data usage when using Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#code-suggestions-data-usage). \n\n## IDE support\nCode Suggestions is available in VS Code via the [GitLab Workflow extension](https://docs.gitlab.com/ee/user/project/repository/vscode.html#gitlab-workflow-extension-for-vs-code). We will soon support the GitLab WebIDE with GitLab 16.0. We are also working on adding [additional IDE support](https://gitlab.com/groups/gitlab-org/-/epics/10542) based on customer feedback, including JetBrains IntelliJ-based IDEs and Visual Studio support for code suggestions. We are also working to improve the user experience for how suggestions are presented and accepted within the IDEs to give developers more control over how the feature works. Additionally we're working to make it easier to setup Code Suggestions the first time and authenticate with GitLab.com \n\n## Self-managed support\nWe are also working to bring Code Suggestions to self-managed instances [via a secure connection to GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/10528). If you have unique requirements for your self-managed instances, we welcome you to express your interest in our [self-managed support issue](https://gitlab.com/gitlab-org/gitlab/-/issues/409183). Commenting on that issue will give you notifications as we post updates. \n\n## Enable Code Suggestions\nOur documentation details how to [enable Code Suggestions in VS Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-vs-code). Below is a quickstart video walkthrough:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/WnxBYxN2-p4\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Beta feature\nThis feature is in [Beta](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#beta). Code Suggestions uses generative AI to suggest code while you're developing. Due to high demand, this feature will have unscheduled downtime and code suggestions in VS Code may be delayed. Code Suggestions may produce [low-quality or incomplete suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#model-accuracy-and-quality). We look forward to hearing your feedback. Beta users should read about the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations). \n\nWe would love to hear about your experience and report issues in the [feedback issues](https://gitlab.com/gitlab-org/gitlab/-/issues/405152). \n\nCode Suggestions is just one of the ways we’re infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-powered features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information about upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":1145,"featured":6,"template":693},"code-suggestions-for-all-during-beta","content:en-us:blog:code-suggestions-for-all-during-beta.yml","Code Suggestions For All During Beta","en-us/blog/code-suggestions-for-all-during-beta.yml","en-us/blog/code-suggestions-for-all-during-beta",{"_path":1151,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1152,"content":1157,"config":1162,"_id":1164,"_type":14,"title":1165,"_source":16,"_file":1166,"_stem":1167,"_extension":19},"/en-us/blog/code-suggestions-improves-developer-productivity",{"title":1153,"description":1154,"ogTitle":1153,"ogDescription":1154,"noIndex":6,"ogImage":868,"ogUrl":1155,"ogSiteName":706,"ogType":707,"canonicalUrls":1155,"schema":1156},"How Code Suggestions can supercharge developers' daily productivity","Learn how you can use GitLab Code Suggestions to accelerate your development.","https://about.gitlab.com/blog/code-suggestions-improves-developer-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Code Suggestions can supercharge developers' daily productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Neha Khalwadekar\"}],\n        \"datePublished\": \"2023-05-25\",\n      }",{"title":1153,"description":1154,"authors":1158,"heroImage":868,"date":1159,"body":1160,"category":10,"tags":1161},[873],"2023-05-25","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nIn the fast-paced world of software development, time is a precious resource. Developers constantly strive for ways to improve the productivity and efficiency of their workflows. Enter Code Suggestions, a large language model (LLM)-based technology that can transform the everyday developer experience. Let’s delve into the novel use cases of Code Suggestions, including: \n\n* simplifying operations\n* assisting new developers in language explorations\n* eliminating the need for frequent web searches by experienced developers\n\nAll of these are examples of how Code Suggestions can accelerate the daily developer experience. Let’s explore some specific examples of these use cases.\n\n## Import packages\nWith Code Suggestions, developers can quickly complete mundane tasks like importing Python packages. \n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/python_packages.gif)\n\n\n## Complete functions\nCode Suggestions can help developers complete functions and use those functions to write code. In the example below, we are defining the first and last name and then defining a full name. Now we can take this a step forward and use those defined functions in a user form. \n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/sample_functions.gif)\n\n\n## Fill in boilerplate\nDevelopers can use Code Suggestions to recommend boilerplate code such as connecting to a mySQL database.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/mysql-boiler.gif)\n\n\n## Building data frames\nData manipulation is a fundamental task for developers working with structured data. Code Suggestions can simplify the process of offering intelligent recommendations for DataFrame operations. Code Suggestions can assist in saving developers the time and effort of searching through documentation or experimenting with trial and error.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/dataframe.gif)\n\n\n## Generate unit tests\nWith Code Suggestions, developers can quickly write unit tests for the supported programming languages.\n![Example](https://about.gitlab.com/images/blogimages/2023-05-25-Blog-code-suggestions-improves-developer-productivity/unit-test.gif)\n\n\n## Try Code Suggestions today\nCode Suggestions is now available for free on GitLab.com for all users while the feature is in Beta. Teams can boost efficiency with the help of generative AI that suggests code while they're developing. We are improving the underlying AI model weekly to improve the [quality of suggestions](https://gitlab.com/groups/gitlab-org/-/epics/10562). Please remember that AI is non-deterministic, so you may not get the same suggestion from week to week. Also remember that any time you are using AI-generated code you should be automatically analyzing it with [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [security scanning](https://docs.gitlab.com/ee/user/application_security/), both of which are available natively in the GitLab platform. \n\nWe’ve extended [language support](https://gitlab.com/groups/gitlab-org/-/epics/10561) from our initial six languages to now include 13 languages: C/C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript.\n\nRead more about these [improvements and what’s next.](https://about.gitlab.com/blog/code-suggestions-for-all-during-beta/)\n\nInterested in using these AI-powered features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information about upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":1163,"featured":6,"template":693},"code-suggestions-improves-developer-productivity","content:en-us:blog:code-suggestions-improves-developer-productivity.yml","Code Suggestions Improves Developer Productivity","en-us/blog/code-suggestions-improves-developer-productivity.yml","en-us/blog/code-suggestions-improves-developer-productivity",{"_path":1169,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1170,"content":1176,"config":1183,"_id":1185,"_type":14,"title":1186,"_source":16,"_file":1187,"_stem":1188,"_extension":19},"/en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"title":1171,"description":1172,"ogTitle":1171,"ogDescription":1172,"noIndex":6,"ogImage":1173,"ogUrl":1174,"ogSiteName":706,"ogType":707,"canonicalUrls":1174,"schema":1175},"How machine learning ops works with GitLab and continuous machine learning","We share different machine learning use cases for CML projects using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681802/Blog/Hero%20Images/gitlab_cml_dvc_banner.png","https://about.gitlab.com/blog/continuous-machine-learning-development-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How machine learning ops works with GitLab and continuous machine learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dr. Elle O'Brien\"}],\n        \"datePublished\": \"2020-12-01\",\n      }",{"title":1171,"description":1172,"authors":1177,"heroImage":1173,"date":1179,"body":1180,"category":1181,"tags":1182},[1178],"Dr. Elle O'Brien","2020-12-01","Continuous integration (CI) is standard practice in software development for\nspeeding up development cycles, and for keeping them short and painless. CI\nmeans making small commits, often, and automating tests so every commit is a\nrelease candidate.\n\n\nWhen a project involves machine learning (ML), though, new challenges arise:\nTraditional [version control systems](/topics/version-control/) (like Git)\nthat are key to CI struggle to manage large datasets and models.\n\nFurthermore, typical pass-fail tests are too coarse for understanding ML\nmodel performance – you might need to consider how several metrics, like\naccuracy, sensitivity, and specificity, are affected by changes in your code\nor data.\n\nData visualizations like confusion matrices and loss plots are needed to\nmake sense of the high-dimensional and often unintuitive behavior of models.\n\n\n## Continuous machine learning: an introduction\n\n\n[Iterative.ai](https://iterative.ai), the team behind the popular open\nsource version control system for ML projects [DVC](https://dvc.org) (short\nfor Data Version Control),\n\nhas recently released another open source project called\n[CML](https://cml.dev), which stands for continuous machine learning.\n\nCML is our approach to adapting powerful CI systems like GitLab CI to common\ndata science and ML use cases, including:\n\n\n- Automatic model training\n\n- Automatic model and dataset testing\n\n- Transparent and rich reporting about models and datasets (with data viz\nand metrics) in a merge request (MR)\n\n\n## Your first continuous machine learning report\n\n\nCML helps you put tables, data viz, and even sample outputs from models into\ncomments on your MRs, so you can review datasets and models like code.\n\nLet's see how to produce a basic report – we'll train an ML model using\nGitLab CI, and then report a model metric and confusion matrix in our MR.\n\n\n![Confusion\nMatrix](https://about.gitlab.com/images/blogimages/cml_confusion_matrix.jpg){:\n.shadow.medium.center}\n\nConfusion matrix\n\n{: .note.text-center}\n\n\nTo make this report, our `.gitlab-ci.yml` contains the following workflow:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n    - cml_run\n\ncml:\n    stage: cml_run\n    image: dvcorg/cml-py3:latest\n\n    script:\n        - pip3 install -r requirements.txt\n        - python train.py\n\n        - cat metrics.txt >> report.md\n        - echo >> report.md\n        - cml-publish confusion_matrix.png --md --title 'confusion-matrix' >> report.md\n        - cml-send-comment report.md\n\n```\n\n\nThe entire [project repository is available\nhere](https://gitlab.com/iterative.ai/cml-base-case/).\n\nThe steps consist of the following:\n\n\n- **Train**: This is a classic training step where we install requirements\n(like `pip` packages) and run the training script.\n\n- **Write a CML report**: Produced metrics are appended to a markdown\nreport.\n\n- **Publish a CML report**: CML publishes an image of the confusion matrix\nwith the embedded metrics to your GitLab MR.\n\n\nNow, when you and your teammates are deciding if your changes have had a\npositive effect on your modeling goals,\n\nyou have a dashboard of sorts to review. Plus, this report is linked by Git\nto your exact project version (data and code) and the runner used for\ntraining and the logs from that run.\n\n\nThis is the simplest use case for achieving continuous machine learning with\nCML and GitLab. In the next section we'll look at a more complex use case.\n\n\n## CML with DVC for data version control\n\n\nIn machine learning projects, you need to track changes in your datasets as\nwell as changes in your code.\n\nSince Git is frequently a poor fit for managing large files, we can use\n[DVC](https://dvc.org) to link remote datasets to your CI system.\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - cml_run\n\ncml:\n  stage: cml_run\n  image: dvcorg/cml-py3:latest\n  script:\n    - dvc pull data\n\n    - pip install -r requirements.txt\n    - dvc repro\n\n    # Compare metrics to master\n    - git fetch --prune\n    - dvc metrics diff --show-md master >> report.md\n    - echo >> report.md\n\n    # Visualize loss function diff\n    - dvc plots diff\n      --target loss.csv --show-vega master > vega.json\n    - vl2png vega.json | cml-publish --md >> report.md\n    - cml-send-comment report.md\n```\n\n\nThe entire [project is available\nhere](https://gitlab.com/iterative.ai/cml-dvc-case).\n\nIn this workflow, we have additional steps that use DVC to pull a training\ndataset, run an experiment, and then use CML to publish the report in your\nMR.\n\n\n![CML with DVC](https://about.gitlab.com/images/blogimages/cml_dvc.jpg){:\n.shadow.medium.center}\n\nCML with DVC\n\n{: .note.text-center}\n\n\nFor more details about ML data versioning and tracking, check out the [DVC\ndocumentation](https://dvc.org/doc).\n\n\n## Summary\n\n\nWe made CML to adapt CI to machine learning, so data science teams can enjoy\nbenefits such as:\n\n\n- Your code, data, models, and training infrastructure (hardware and\nsoftware environment) will be Git versioned.\n\n- You’re automating work, testing frequently, and getting fast feedback\n(with visual reports if you use CML). In the long run, this will almost\ncertainly speed up your project’s development.\n\n- CI systems make your work visible to everyone on your team. No one has to\nsearch very hard to find the code, data, and model from your best run.\n\n\n### About the guest author\n\n\n_Dr. Elle O'Brien is a Ph.D data scientist at iterative.ai and co-creator of\n[CML](https://cml.dev) project. She is also a lecturer at\n[UMSI](https://www.si.umich.edu/)._\n","news",[109,269,233,896,9],{"slug":1184,"featured":6,"template":693},"continuous-machine-learning-development-with-gitlab-ci","content:en-us:blog:continuous-machine-learning-development-with-gitlab-ci.yml","Continuous Machine Learning Development With Gitlab Ci","en-us/blog/continuous-machine-learning-development-with-gitlab-ci.yml","en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"_path":1190,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1191,"content":1197,"config":1203,"_id":1205,"_type":14,"title":1206,"_source":16,"_file":1207,"_stem":1208,"_extension":19},"/en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"title":1192,"description":1193,"ogTitle":1192,"ogDescription":1193,"noIndex":6,"ogImage":1194,"ogUrl":1195,"ogSiteName":706,"ogType":707,"canonicalUrls":1195,"schema":1196},"Develop C++ unit testing with Catch2, JUnit, and GitLab CI","Learn how to set up, write, and automate C++ unit tests using Catch2 with GitLab CI/CD. See examples from a working air quality app project and AI-powered help from GitLab Duo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659684/Blog/Hero%20Images/AdobeStock_479904468__1_.jpg","https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Develop C++ unit testing with Catch2, JUnit, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-07-02\",\n      }",{"title":1192,"description":1193,"authors":1198,"heroImage":1194,"date":1199,"body":1200,"category":1201,"tags":1202},[711],"2024-07-02","Continuous integration (CI) and automated testing are important DevSecOps workflows for software developers to detect bugs early, improve code quality, and streamline their development processes. \n\nIn this tutorial, you'll learn how to set up unit testing on a `C++` project with [Catch2](https://github.com/catchorg/Catch2) and GitLab CI for continuous integration. You'll also see how the AI-powered features of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. We’ll use [an air quality monitoring application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) as our reference project.\n\n## Prerequisites\n\n- Ensure you have [CMake](https://cmake.org/ \"CMake\") installed on your machine. \n- A modern `C++` compiler such as GCC or Clang is required. \n- An API key from [OpenWeatherMap](https://openweathermap.org/api) - requires signing up for a free account (1,000/calls per day are included for free). \n\n## Set up the application for testing\n\nThe reference project we’ll be using for demonstrating testing in this blog post is an air quality monitoring application that fetches air quality data from the OpenWeatherMap API based on the U.S zip codes only provided by the user.\n\nHere are the steps to set up the application for testing:\n\n1. Fork the [the reference project](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) and clone the fork to your local environment.\n\n2. Generate an API key from  [OpenWeatherMap](https://openweathermap.org/) and export it into the environment. \n\n```shell\nexport API_KEY=\"YOURAPIKEY_HERE\"\n```\n\n3. Alternatively, you can add the key into your `.env` configuration, and source it with `source ~/.env`, or use a different mechanism to populate the environment.\n\n4. Compile and build the project code with the following instructions:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n```\n\n5. Run the application using the executable and passing in a U.S zip code (90210 as an example): \n\n```cpp\n./build/air_quality_app 90210\n```\n\nHere’s an example of what running the program will look like in your terminal:  \n\n```bash\n❯ ./build/air_quality_app 90210\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\n## Install Catch2\n\nNow that the application is set up and working, let's start working on adding testing using Catch2. Catch2 is a modern, `C++-native` testing framework for unit tests. \n\nYou can also ask GitLab Duo Chat within your IDE for an introduction to getting started with Catch2 as a `C++` testing framework. GitLab Duo Chat will provide getting started steps as well as an example test: \n\n![GitLab Duo Chat starting steps and example test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676997/Blog/Content%20Images/1.duo-chat-installing-catch2.png)\n\n1. First navigate to your project’s root directory and create an externals folder using the `mkdir` command.\n\n```shell\nmkdir externals\n```\n\n2. There are several ways to install Catch2 via [its CMake integration](https://github.com/catchorg/Catch2/blob/devel/docs/cmake-integration.md#top). We will use the option of installing it as a submodule and including it as part of the source code to simplify dependency management. To add Catch2 to your project in the `externals` folder: \n\n```shell\ngit submodule add https://github.com/catchorg/Catch2.git externals/Catch2\ngit submodule update --init --recursive\n```\n\n3. Update `CMakeLists.txt` to include Catch2’s directory as a subdirectory. This allows CMake to find and build Catch2 as a part of our project. \n\n```cpp\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n```\n\n4. Create a `tests.cpp` file in your project root to write our tests to: \n\n```shell\ntouch tests.cpp\n```\n\n5. Update `CMakeLists.txt` Link against Catch2. When defining your test executable in CMake, link it against Catch2:\n\n```cpp\n# Add tests executable and link it to Catch2\nadd_executable(tests test.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\n## Structure the project for testing\n\nBefore we start writing our tests, we should separate our application logic into separate files in order to maintain and test our code more efficiently. At the end of this section we should have:\n\n```\nmain.cpp containing only the main() function and application setup\nincludes/functions.cpp containing all functional code such as API calls and data processing: \nincludes/functions.h containing the declarations for the functions defined in functions.cpp.  It needs to define the preprocessor macro guards, and include all necessary headers. \n```\n\nApply the following changes to the files: \n\n1. `main.cpp`\n\n```cpp\n#include \u003Ciostream>\n#include \"functions.h\"\n\nint main(int argc, char* argv[]) {\n   if (argc \u003C 2) {\n       std::cerr \u003C\u003C \"Usage: \" \u003C\u003C argv[0] \u003C\u003C \" \u003CZip Code>\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string zipCode = argv[1];\n   std::string apiKey = getApiKey();\n   if (apiKey.empty()) {\n       std::cerr \u003C\u003C \"API key not found.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   auto [lat, lon] = geocodeZipcode(zipCode, apiKey);\n   if (lat == 0 && lon == 0) {\n       std::cerr \u003C\u003C \"Failed to geocode zipcode.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   std::string airQualityInfo = parseAirQualityResponse(response);\n\n   std::cout \u003C\u003C \"Air Quality Index for Zip Code \" \u003C\u003C zipCode \u003C\u003C \": \" \u003C\u003C airQualityInfo \u003C\u003C std::endl;\n\n   return 0;\n}\n```\n\n2. Create a `functions.h:` in the `includes` folder: \n\n```cpp\n#ifndef FUNCTIONS_H\n#define FUNCTIONS_H\n\n#include \u003Cstring>\n#include \u003Cutility>\n#include \u003Cvector>\n\n// Declare the function prototype\nstd::string httpRequest(const std::string& url);\nbool loadEnvFile(const std::string& filename);\nstd::string getApiKey();\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey);\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey);\nstd::string parseAirQualityResponse(const std::string& response);\n\n#endif\n```\n\n3. Create a `functions.cpp` in the `includes` folder: \n\n```cpp\n#include \"functions.h\"\n#include \u003Cfstream>\n#include \u003Celnormous/HTTPRequest.hpp>\n#include \u003Cnlohmann/json.hpp>\n#include \u003Ciostream>\n#include \u003Ccstdlib> // For getenv\n\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\nstd::string getApiKey() {\n   const char* envApiKey = std::getenv(\"API_KEY\");\n   if (envApiKey) {\n       return std::string(envApiKey);\n   }\n   // If the environment variable is not set, fallback to the config file\n   std::ifstream configFile(\"config.txt\");\n   std::string line;\n   if (getline(configFile, line)) {\n       return line.substr(line.find('=') + 1);\n   }\n   return \"\";\n}\n\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/geo/1.0/zip?zip=\" + zipCode + \",US&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"lat\") && json.contains(\"lon\")) {\n           double lat = json[\"lat\"];\n           double lon = json[\"lon\"];\n           return {lat, lon};\n       } else {\n           std::cerr \u003C\u003C \"Geocode response missing 'lat' or 'lon' fields: \" \u003C\u003C response \u003C\u003C std::endl;\n       }\n   } catch (const nlohmann::json::parse_error& e) {\n       std::cerr \u003C\u003C \"Failed to parse geocode response: \" \u003C\u003C e.what() \u003C\u003C \" - Response: \" \u003C\u003C response \u003C\u003C std::endl;\n   }\n   return {0, 0};\n}\n\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/air_pollution?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   return response;\n}\n\nstd::string parseAirQualityResponse(const std::string& response) {\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"list\") && !json[\"list\"].empty() && json[\"list\"][0].contains(\"main\")) {\n           int aqi = json[\"list\"][0][\"main\"][\"aqi\"];\n           std::string aqiCategory;\n           switch (aqi) {\n               case 1:\n                   aqiCategory = \"Good\";\n                   break;\n               case 2:\n                   aqiCategory = \"Fair\";\n                   break;\n               case 3:\n                   aqiCategory = \"Moderate\";\n                   break;\n               case 4:\n                   aqiCategory = \"Poor\";\n                   break;\n               case 5:\n                   aqiCategory = \"Very Poor\";\n                   break;\n               default:\n                   aqiCategory = \"Unknown\";\n                   break;\n           }\n           return std::to_string(aqi) + \" (\" + aqiCategory + \")\";\n       } else {\n           return \"No AQI data available\";\n       }\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Failed to parse JSON response: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"Error parsing AQI data\";\n   }\n}\n\n```\n\n4. Now that we have separated the source files, we also need to update our `CMakeLists.txt` to include `functions.cpp` in the `add_executable()` calls:\n\n```cpp\ncmake_minimum_required(VERSION 3.14)\nproject(air-quality-app)\n\n# Set the C++ standard for the project\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\ninclude_directories(${CMAKE_SOURCE_DIR}/includes)\n\n# Define the main program executable\nadd_executable(air_quality_app main.cpp includes/functions.cpp)\n\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n\n# Add tests executable and link it to Catch2\nadd_executable(tests tests.cpp includes/functions.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\nTo verify that the changes are working, regenerate the CMake configuration and rebuild the source code with the following commands. The build will take longer now that we're compiling Catch2 files. \n\n```shell\nrm -rf build # delete existing build files\ncmake -S . -B build \ncmake --build build  \n```\n\nYou should be able to run the application without any errors.\n\n```shell\n./build/air_quality_app 90210\n```\n\n## Write tests in Catch2  \n\nCatch2 tests are made up of [macros and assertions](https://github.com/catchorg/Catch2/blob/devel/docs/assertions.md). Macros in Catch2 are used to define test cases and sections within those test cases. They help in organizing and structuring the tests. Assertions are used to verify that the code behaves as expected. If an assertion fails, the test case will fail, and Catch2 will report the failure.\n\nLet’s review a basic test scenario for an addition function to understand. Note: This test is read-only, as an example. \n\n```cpp\nint add(int a, int b) {\n   return a + b;\n}\n\nTEST_CASE(\"Addition works correctly\", \"[math]\") {\n   REQUIRE(add(1, 1) == 2);  // Test passes if 1+1 equals 2\n   REQUIRE(add(2, 2) != 5);  // Test passes if 2+2 does not equal 5\n}\n```\n\n- Each test begins with the `TEST_CASE` macro, which defines a test case container. The macro accepts two parameters: a string describing the test case and optionally a second string for tagging the test for easy filtering.\n- Tests are also composed of assertions, which are statements that check if conditions are true. Catch2 provides macros for assertion that include `REQUIRE`, which aborts the current test if the assertion fails, and `CHECK`, which logs the failure but continues with the current test.\n\n### Prepare to write tests with Catch2\n\nTo test the API retrieval functions in our air quality application, we’ll be using mock API requests. Mock API testing is a technique used to test how your application will interact with an external API without making any real API calls. Instead of sending requests to a live API server, we can simulate the responses using predefined data. Mock requests allow us to control the input data and specify exactly what the API would return for different requests, making sure that our tests aren't affected by changes in the real API responses or unexpected data. This also makes it easier for us to simulate and catch different failures.\n\nIn our `tests.cpp` file, let’s define the following function to run mock API requests.   \n\n```cpp\n#include \"includes/functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string& url) {\n   if (url.find(\"geo\") != std::string::npos) {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\"; \n   } else if (url.find(\"air_pollution\") != std::string::npos) {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   // Default mock response for unmatched endpoints\n   return \"{}\";\n}\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string& url) {\n   return mockHttpRequest(url);\n}\n```\n\n- This function simulates HTTP requests and returns predefined JSON responses based on the URL given as input. \n- It also checks the URL to determine which type of data is being requested based on the functionality of the application (geocoding, air pollution, or forecast data). If the URL doesn’t match the expected endpoint, it returns an empty JSON object. \n\nDon't compile the code just yet, as you'll see a linker error. Since we're overriding the original `httpRequest` function with our mock function for testing, we'll need a preprocessor macro to enable conditional compilation - indicating which `httpRequest` function should run when we're compiling tests. \n\n#### Define a preprocessor macro for testing  \n\nBecause we’ve overridden `httpRequest` in our `tests.cpp`, we need to exclude that code from `functions.cpp` when we’re testing. When building tests, we may need to ensure that certain parts of our code behave differently or are excluded. We can do this by defining a preprocessor macro `TESTING` which enables conditional compilation, allowing us to selectively include or exclude code when compiling the test target:  \n\nWe define the `TESTING` macro in our `CMakeLists.txt` at the end:  \n\n```cpp\n# Define TESTING macro for this target\ntarget_compile_definitions(tests PRIVATE TESTING)\n```\n\nAnd add the macro wrapper in  `functions.cpp` around the original `httpRequest` function:  \n\n```cpp\n#ifndef TESTING  // Exclude this part when TESTING is defined\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\n#endif\n```\n\nRegenerate the CMake configuration and rebuild the source code to verify it works.\n\n```shell\ncmake --build build  \n```\n\n### Write the first tests \n\nNow, let’s write some tests for our air quality application.\n\n#### Test 1: Verify API key retrieval \n\nThis test ensures that the `getApiKey` function retrieves the API key correctly from the environment variable or the configuration file. Add the test case to our `tests.cpp`:\n\n```cpp\n\nTEST_CASE(\"API Key Retrieval\", \"[api]\") {\n   // Set the API_KEY environment variable for testing\n   setenv(\"API_KEY\", \"test_key\", 1);\n   // Test if the key is retrieved correctly\n   REQUIRE(getApiKey() == \"test_key\");\n}\n```\n\nYou can verify that this tests passes by rebuilding the code and running the tests:\n\n```shell\ncmake --build build\n./build/tests\n```\n\n#### Test 2: Geocode the zip code\n\nThis test ensures that the `geocodeZipcode` function returns the correct latitude and longitude for a given zip code using the mock API response function we set up earlier. The  `geocodeZipcode` function is supposed to hit an API that returns geographic coordinates based on a zip code. \n\nIn `tests.cpp`, add this test case for the zip code 90210: \n\n```cpp\nTEST_CASE(\"Geocode Zip code\", \"[geocode]\") {\n   std::string apiKey = \"test_key\";\n   std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n   // Check latitude\n   REQUIRE(coordinates.first == 40.7128);\n   // Check longitude \n   REQUIRE(coordinates.second == -74.0060);\n}\n```\n\nThe purpose of this test is to verify that the function `geocodeZipcode` can correctly parse the latitude and longitude from the API response. By hardcoding the expected response, we ensure that the test environment is controlled and predictable.\n\n #### Test 3: Air quality API test\n\nThis test ensures that the `fetchAirQuality` function correctly fetches air quality data using the mock API response function we set up earlier. It verifies that the function constructs the API request properly, sends it, and accurately parses the air quality index (AQI) from the mock JSON response. This validation helps ensure that the overall process of fetching and interpreting air quality data works as intended.\n\n```cpp\nTEST_CASE(\"Fetch Air Quality\", \"[airquality]\") {\n   std::string apiKey = \"test_key\";\n   double lat = 40.7128;\n   double lon = -74.0060;\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   // Check the response\n   REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n}\n```\n\n## Build and run the tests\n\nTo  build and compile our application, we'll use the same CMake commands as before:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n\n```\n\nAfter building, we can run our tests by executing the test binary:  \n\n```cpp\n./build/tests\n\n```\n\nRunning this command will execute all defined tests, and you will see output indicating whether each test has passed or failed.\n\n![Output showing pass/fail of tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.running-catch2-tests.png)\n\n## Set up GitLab CI/CD\n\nTo automate the testing process each time we push some new code to our repository, let’s set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/). Create a new `.gitlab-ci.yml` configuration file in the root directory. \n\n```yaml\nimage: gcc:latest\n\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\n\nstages:\n - build\n - test\n\nbefore_script:\n - apt-get update && apt-get install -y cmake\n\ncompile:\n stage: build\n script:\n   - cmake -S . -B build\n   - cmake --build build\n artifacts:\n   paths:\n     - build/\n\ntest:\n stage: test\n script:\n   - ./build/tests --reporter junit -o test-results.xml\n artifacts:\n   reports:\n     junit: test-results.xml\n```\n\nThis CI/CD configuration will compile both the main application and the test suite, then run the tests, generating a JUnit XML report which GitLab uses to display the test results.  \n\n- In `before_script`, we added an installation for `cmake`, and `git submodule sync --recursive` which initializes and updates our submodules (catch2). \n- In the `test` stage, `--reporter junit -o test-results.xml` specifies that the test results should be treated as a JUnit report which allows GitLab CI to display results in the UI. This is super helpful when you have several tests in your application.  \n\nWe also need to [add an environmental variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) with the `API_KEY` in project settings on GitLab.\n\nDon’t forget to add all new files to Git, and commit and push the changes in a new MR:\n\n```shell\ngit checkout -b tests-catch2-cicd\n\ngit add includes/functions.{h,cpp} tests.cpp .gitlab-ci.yml \ngit add CMakeLists.txt main.cpp \n\ngit commit -vm “Add Catch2 tests and CI/CD configuration”\ngit push \n```\n\n## View the test report\n\nAfter pushing our code changes, we can review the results of our tests in the GitLab UI in the Pipeline view in the `Tests` tab:\n\n![GitLab pipeline view shows test results](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.0-passed-tests-UI.png)\n\n## Simulate a test failure\n\nTo demonstrate how our UI will handle test failures, we can intentionally introduce a bug into our code and observe the resulting behavior. \n\nLet's modify our `parseAirQualityResponse` function to introduce an error. We can change the AQI category for an AQI value of 2 from \"Fair\" to \"Poor.\" This change will cause the related test to fail, allowing us to see the test failure in the GitLab UI.\n\nIn `functions.cpp`, find the `parseAirQualityResponse` function and modify the switch statement for case `2` to set the `Poor` value instead of `Fair`:\n\n```cpp\n               // Intentional bug:\n               case 2:\n                   aqiCategory = \"Poor\";\n                   break;\n```\n\nIn tests.cpp, add a new test case that directly checks the output of the `parseAirQualityResponse` function. This test ensures that the `parseAirQualityResponse` function correctly parses and categorizes the air quality data from the mock API response. This function takes a JSON response, extracts the AQI value, and translates it into a human-readable category.\n\n```cpp\n\nTEST_CASE(\"Parse Air Quality Response\", \"[airquality]\") {\n   std::string mockResponse = R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   std::string result = parseAirQualityResponse(mockResponse);\n   // This should fail due to the intentional bug\n   REQUIRE(result == \"2 (Fair)\");\n}\n\n```\n\nCommit the changes, and push them into the MR. Open the MR in your browser. \n\nBy introducing an intentional bug in this function, we can see how a test failure is reported in GitLab's pipelines UI. We must add, commit, and push the changes to our repository to view the test failure in the pipeline. \n\n![Simulated test failure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.1-failed-test-simulation.png)\n\n![Details of the simulated failed test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.2-failed-test-simulation-details.png)\n\nOnce we've verified this simulated test failure, we can use `git revert` to roll back that commit. \n\n```shell\ngit revert\n```\n\n## Add and test a new feature\n\nLet’s put what you've learned together by creating a new feature in the air quality application and then writing a test for that feature using Catch2. The new feature will fetch the current weather forecast for the provided zip code.\n\nFirst, we'll define a `Weather` struct and add the function prototype in our `functions.h` file (inside the `#endif`):\n\n```cpp\n\nstruct Weather {\n   std::string main;\n   std::string description;\n   double temperature;\n};\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon);\n```\n\nThen, we implement the `getCurrentWeather` function in `functions.cpp`. This function calls the OpenWeatherMap API to retrieve the current weather and parses the JSON response. This code was generated using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). If you start typing `Weather getCurrentWeather(const std::string& apiKey, double lat, double lon) {` to complete the function, GitLab Duo will provide the function contents for you, line by line. \n\n![GitLab Duo completing the function contents](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/3.get-current-weather-function-completion.png)\n\nHere's what your `getCurrentWeather()` function can look like: \n\n```cpp\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/weather?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   auto json = nlohmann::json::parse(response);\n   Weather weather;\n   if (!json.is_null()) {\n       weather.main = json[\"weather\"][0][\"main\"];\n       weather.description = json[\"weather\"][0][\"description\"];\n       weather.temperature = json[\"main\"][\"temp\"];\n   }\n   return weather;\n}\n```\n\nAnd, finally, we update our `main.cpp` file in the main function to output the current forecast (and converting Kelvin to Celsius for the output):  \n\n```cpp\n   Weather currentWeather = getCurrentWeather(apiKey, lat, lon);\n   if (currentWeather.main.empty()) {\n       std::cerr \u003C\u003C \"Failed to fetch current weather.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::cout \u003C\u003C \"Current Weather: \" \u003C\u003C currentWeather.main \u003C\u003C \", \" \u003C\u003C currentWeather.description\n       \u003C\u003C \", temperature \" \u003C\u003C currentWeather.temperature - 273.15 \u003C\u003C \" °C\" \u003C\u003C std::endl;\n```\n\nWe can confirm that our new feature is working by building and running the application:  \n\n```shell\ncmake --build build\n./build/air_quality_app \n```\n\nAnd we should see the following output or similar in case the weather is different on the day the code is run :)\n\n```\nAir Quality Index for Zip Code 90210: 2 (Poor)\nCurrent Weather: Clouds, broken clouds, temperature 23.2 °C\n```\n\nWith all new functionality, there should be testing! We can also write a test to check whether the application is fetching and parsing a weather forecast correctly. This test checks that the function returns a list containing the correct number of forecast entries and that each entry has accurate data regarding time and temperature.\n\n```cpp\nTEST_CASE(\"Current Weather functionality\", \"[api]\") {\n   auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n   // Ensure main weather description is not empty\n   REQUIRE_FALSE(weather.main.empty());\n   // Validate that temperature is a reasonable value\n   REQUIRE(weather.temperature > 0); \n}\n```\n\nWe’ll also have to update our `mockHTTPRequest` function in `tests.cpp` to account for this new test. Modify the if-condition with a new else-if branch checking for the `weather` string in the URL:  \n\n```cpp\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n```\n\nAnd verify that our tests are working by rebuilding and running our tests:  \n\n```shell\ncmake --build build \n./build/tests\n```\n\nAll tests should pass, including the new one for Current Weather Functionality. \n\n## Optimize tests.cpp with sections\n\nTo better organize our tests as the project grows and categorize each functionality, we can use Catch2’s `SECTION` macro. The `SECTION` macro allows you to define logically separate test scenarios within a single test case, providing a clean way to test different behaviors or conditions without requiring multiple separate test cases or multiple files. This approach keeps related tests bundled together and also improves test maintainability by allowing shared setup code to be executed repeatedly for each section.\n\nSince some of our functionality is preprocessing data to retrieve information, let’s section our tests as such:\n- preprocessing steps: \n\t- API key validation\n\t- geocoding validation\n-  API data retrieval:\n\t- air pollution retrieval \n\t- forecast retrieval\n\nHere’s what our `tests.cpp` will look like if organized by sections: \n\n```cpp\n#include \"functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string &url)\n{\n   return mockHttpRequest(url);\n}\n\n// Preprocessing Steps\nTEST_CASE(\"Preprocessing Steps\", \"[preprocessing]\") {\n   SECTION(\"API Key Retrieval\") {\n       // Set the API_KEY environment variable for testing\n       setenv(\"API_KEY\", \"test_key\", 1);\n       // Test if the key is retrieved correctly\n       REQUIRE_FALSE(getApiKey().empty());\n   }\n\n   SECTION(\"Geocode Functionality\") {\n       std::string apiKey = \"test_key\";\n       std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n       // Check latitude\n       REQUIRE(coordinates.first == 40.7128);\n       // Check longitude \n       REQUIRE(coordinates.second == -74.0060);\n   }\n}\n\n// API Data Retrieval\nTEST_CASE(\"API Data Retrieval\", \"[data_retrieval]\") {\n   SECTION(\"Air Quality Functionality\") {\n       std::string apiKey = \"test_key\";\n       double lat = 40.7128;\n       double lon = -74.0060;\n       std::string response = fetchAirQuality(lat, lon, apiKey);\n       // Check the response\n       REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n   }\n\n   SECTION(\"Current Weather Functionality\") {\n       auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n       // Ensure main weather description is not empty\n       REQUIRE_FALSE(weather.main.empty());\n       // Validate that temperature is a reasonable value\n       REQUIRE(weather.temperature > 0);\n   }\n}\n```\n\nRebuild the code and run the tests again to verify.\n\n```shell\ncmake --build build \n./build/tests\n```\n\n## Next steps\n\nIn this post, we covered how to integrate unit testing into a `C++` project using Catch2 testing framework and GitLab CI/CD and set up basic tests for our reference air quality application project.\n\nTo explore these concepts further, you can check out the [Catch2 documentation](https://github.com/catchorg/Catch2) and [GitLab's Unit test report examples documentation](https://docs.gitlab.com/ee/ci/testing/unit_test_report_examples.html). \n\nFor an advanced async exercise, you could build upon this project by using GitLab Duo to implement a feature that retrieves and analyzes historical air quality data and add code quality checks into the CI/CD pipeline. Happy coding! \n","devsecops",[737,978,1081,9,716],{"slug":1204,"featured":91,"template":693},"develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","content:en-us:blog:develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","Develop C Unit Testing With Catch2 Junit And Gitlab Ci","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"_path":1210,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1211,"content":1217,"config":1224,"_id":1226,"_type":14,"title":1227,"_source":16,"_file":1228,"_stem":1229,"_extension":19},"/en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements",{"title":1212,"description":1213,"ogTitle":1212,"ogDescription":1213,"noIndex":6,"ogImage":1214,"ogUrl":1215,"ogSiteName":706,"ogType":707,"canonicalUrls":1215,"schema":1216},"Developing GitLab Duo: A roundup of recent Chat enhancements","Discover the latest improvements to GitLab Duo Chat, including a new integration, prompt cancellation, and architectural upgrades.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098374/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098374059.png","https://about.gitlab.com/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: A roundup of recent Chat enhancements\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jannik Lehmann\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-07-10\",\n      }",{"title":1212,"description":1213,"authors":1218,"heroImage":1214,"date":1221,"body":1222,"category":10,"tags":1223},[1219,1220],"Jannik Lehmann","David O'Regan","2024-07-10","GitLab is committed to [continuously improving GitLab Duo Chat](https://gitlab.com/gitlab-org/gitlab/-/issues/430124), our AI assistant, to meet the evolving needs of our users. Here are some recent enhancements that will streamline your workflow and boost productivity.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today](https://about.gitlab.com/seventeen/)!\n\n## Vulnerability Explanation: A new integration\n\nWe've reached a significant milestone in the evolution of Chat: the integration of [GitLab Duo Vulnerability Explanation](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/). This marks the first feature from our [GitLab Duo](https://about.gitlab.com/gitlab-duo/) platform to be integrated into Chat by a team outside of the AI group, showcasing the collaborative spirit and cross-functional capabilities at GitLab.\n\n### Key highlights of this integration:\n\n- **Swift execution:** The team moved from a spike to implementation in just three weeks, demonstrating agility and execution.\n- **Cross-team collaboration:** This integration was led by teams outside the AI group, paving the way for more diverse feature integrations in the future.\n- **Enhanced security insights:** Users will soon be able to leverage Chat to gain deeper understanding of vulnerabilities detected in their projects.\n\nThis integration represents a significant step forward in making Chat an even more powerful and versatile tool for developers, particularly in the realm of security.\n\n## Enhanced context awareness\n\nWe've made significant strides in improving Chat's context awareness, making it more intelligent and helpful in various scenarios.\n\n### Always-available knowledge\n\nGitLab Duo Chat always has access to:\n- GitLab documentation\n- General programming and coding knowledge\n\nIt's crucial to understand that Chat does not have unrestricted access to your entire GitLab instance or codebase. It only processes the specific information you provide in your query or what's immediately relevant to your current view in the GitLab UI or IDE.\n\nWe're continuously working to expand Chat's contextual awareness to include more types of content, always with a focus on user privacy and data security. This gradual expansion aims to make Chat an even more powerful assistant for your development workflow while maintaining appropriate data access boundaries.\n\n### Expanded contextual knowledge\n\nGitLab Duo Chat now has [a better understanding of the context you're working in](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#the-context-chat-is-aware-of), both in the GitLab UI and IDEs. Here's a breakdown of what Chat is aware of.\n\nIn the GitLab UI:\n- **Epics** - Chat understands when you refer to \"this epic\" or use the epic's URL.\n- **Issues** - Similar to epics, Chat recognizes \"this issue\" or the issue's URL.\n- **Code files** - When viewing a single file, Chat can interpret requests about \"this code\" or \"this file\".\n\nIn IDEs:\n- **Selected code** - Chat can analyze code you've selected when you ask about \"this code\" or \"this file\".\n- **Epics and issues** -  Chat can understand context when you provide the URL.\n\nAdditionally, when using slash commands like `/explain`, `/refactor`, or `/tests` in IDEs, Chat has access to the selected code.\n\n![Screenshot of GitLab Duo Chat window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098382107.png)\n\n### Chat history and caching\nGitLab Duo Chat retains the last 50 messages in the chat history. This history expires three days after last use. Closing your browser or IDE will not permanently delete your chat history within this timeframe, but it's important to note that long-term persistence of chat data is not currently supported.\n\n## Prompt cancellation: Stop responses on demand\n\nOne of the most anticipated features is now available: [prompt cancellation](https://gitlab.com/groups/gitlab-org/-/epics/13662). Users can now cancel ongoing prompts in Chat on GitLab.com, giving you [more control over your interactions](https://gitlab.com/gitlab-org/gitlab/-/issues/458397).\n- Available now: This feature has been rolled out on GitLab.com.\n- Coming soon: This feature will be available for self-managed instances in our next release. GitLab Dedicated users will receive it in the monthly upgrade.\n- Work in progress: [Integration for editor extensions](https://docs.gitlab.com/ee/editor_extensions/) - [follow along in the issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin/-/issues/335).\n\nThis enhancement allows you to stop a response if you've sent a prompt too early or had a change of thought while waiting. It's a small but powerful feature that can save you time and frustration.\n\nTo cancel a prompt in GitLab Duo Chat, follow these steps:\n1. Open GitLab Duo Chat on GitLab.com.\n2. Start typing a prompt or question such as `What is this issue about?`.\n\n![Screen showing the start of how to cancel prompts in GitLab Duo Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098382108.png)\n\n3. After sending the prompt, if you wish to cancel the response, look for the new \"Cancel\" button that appears while Chat is generating a response.\n\n![Screenshot of Chat with Cancel button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098382110.png)\n\n4. Click the \"Cancel\" button to immediately stop the response generation.\n\n![Screenshot showing response stopped](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098382/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098382112.png)\n\n## Architectural improvements\n\nBehind the scenes, we've been working on architectural improvements to make GitLab Duo Chat more robust and efficient:\n\n- Moving to Language Server Protocol ([LSP](https://gitlab.com/gitlab-org/editor-extensions/gitlab-lsp)): This effort improves the integration of Chat with various development environments. \n- GitLab Language Server is an experimental TypeScript project that provides a common interface for IDE extensions to build GitLab functionality. It currently supports GitLab Duo Code Suggestions and upcoming will support GitLab Duo Chat.\n\nWhile this change primarily affects the underlying architecture, users may notice:\n- Improved responsiveness and performance when using Chat across different IDEs and editors.\n- More consistent behavior of Chat features across various development environments.\n- Enhanced ability to add new features and improvements in the future.\n\nCheck out our introduction to how GitLab Language Server powers Code Suggestions in this video walkthrough:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VQlWz6GZhrs?si=_G5mOyYqEGAmnRv4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What's next?\n\nWe're continuously improving GitLab Duo Chat. Here are some highlights:\n\n- We're in the process of migrating our AI features to [Claude 3.5 Sonnet](https://gitlab.com/gitlab-org/gitlab/-/issues/468334). This upgrade will bring improved performance and capabilities to Chat and other AI-powered features.\n- We're actively working on [enabling Chat to work with custom, self-hosted models](https://gitlab.com/groups/gitlab-org/-/epics/13760). This will allow organizations to use their own AI models with Chat, providing more control over the AI's knowledgebase and potentially improving performance for domain-specific tasks.\n- We're currently finishing the [synchronization of messages across all clients](https://gitlab.com/gitlab-org/gitlab/-/issues/418760), including WebUI, to ensure seamless communication and keep all your clients in sync, enhancing your collaboration experience.\n- We’re [migrating the “Summarize Comments” feature to Chat](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/156650). You'll be able to summarize multiple comments on an issue directly within Chat, helping you quickly understand the main points and key takeaways from discussions, thereby improving your collaborative experience.\n\nWe look forward to [hearing your feedback on these enhancements](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). Stay tuned for more updates as we continue to evolve GitLab Duo Chat.\n\n> Find out even more about [how we are developing GitLab Duo](https://about.gitlab.com/blog/developing-gitlab-duo-series/) with our ongoing series.\n",[9,717,738],{"slug":1225,"featured":91,"template":693},"developing-gitlab-duo-a-roundup-of-recent-chat-enhancements","content:en-us:blog:developing-gitlab-duo-a-roundup-of-recent-chat-enhancements.yml","Developing Gitlab Duo A Roundup Of Recent Chat Enhancements","en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements.yml","en-us/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements",{"_path":1231,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1232,"content":1238,"config":1244,"_id":1246,"_type":14,"title":1247,"_source":16,"_file":1248,"_stem":1249,"_extension":19},"/en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai",{"title":1233,"description":1234,"ogTitle":1233,"ogDescription":1234,"noIndex":6,"ogImage":1235,"ogUrl":1236,"ogSiteName":706,"ogType":707,"canonicalUrls":1236,"schema":1237},"Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI","Our blog series continues spotlighting a new feature that provides detailed metrics, such as the Code Suggestions Usage Rate, to help understand the effectiveness of AI investments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098611/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098611370.png","https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2024-05-15\",\n      }",{"title":1233,"description":1234,"authors":1239,"heroImage":1235,"date":1241,"body":1242,"category":10,"tags":1243},[1240],"Haim Snir","2024-05-15","***Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.***\n\nAs organizations adopt [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI features to power DevSecOps workflows, business and engineering leaders need real-time visibility into the technology's ROI. Granular usage data, performance improvements, the trade-off between speed, security, and quality, and other [productivity metrics](https://about.gitlab.com/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics/) are essential to evaluate the effectiveness of AI in software development. That's why we created the AI Impact analytics dashboard for GitLab Duo, available in GitLab 17.0, as a new way to measure the ROI of AI.\n\n> [Take an interactive tour of the AI Impact analytics dashboard](https://gitlab.navattic.com/ai-impact).\n\n## Understanding the ROI of GitLab Duo AI-powered capabilities\n\nTo properly evaluate AI's impact on the software development lifecycle, organizations have told us they want to:\n- visualize which metrics improved as a result of investments in AI\n- compare the performance of teams that are using AI against teams that are not using AI\n- track the progress of AI adoption\n- automate insights extraction from a large volume of performance data\n\nAI Impact analytics dashboard features these capabilities and more with customizable visualization, which enables teams to:\n- **Monitor AI adoption:** Observing AI adoption rates enables organizations to evaluate organizational strategies to maximize the ROI on their technology investments. \n- **Track performance improvements:** By tracking performance metrics and observing changes after the adoption of AI, leaders can quickly assess the benefits and business value of AI features.\n\n## What is the AI Impact analytics dashboard?\n\nIn this first release of the AI Impact analytics dashboard, we focus on providing insights and metrics about GitLab Duo Code Suggestions adoption, including:\n\n- **Detailed usage metrics:** Discover the ratio of monthly Code Suggestions usage compared to the total number of unique code contributors to know how deeply Code Suggestions is adopted within your teams.\n- **Correlation observations:** Examine how trends in AI usage within a project or across a group influence other crucial productivity metrics, displayed for the current month and the trailing six months. \n    - For this correlation analysis we added a new metric \"Code Suggestions Usage Rate\" as the Independent Variable (the cause). The monthly Code Suggestions Usage Rate is calculated as the number of monthly unique Code Suggestions users divided by total monthly unique [contributors](https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events). GitLab considers the total monthly unique code contributors, which means only users with pushed events are included in the calculation.\n    - As Dependent Variables (the effect), we added these [performance metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports): Cycle Time, Lead Time and Deployment Frequency. And as [Quality and Security Metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports), we added Change Failure Rate and Critical Vulnerabilities. \n- **Comparison view:**  Understand the difference in the performance of teams that are and are not using AI, and manage the trade-off between speed, quality, and security exposure.\n\n![Comparison of AI usage and SDLC performance](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098620998.png)\n\n## What’s next for the AI Impact analytics dashboard?\n\nLooking ahead, we have exciting plans to expand the capabilities of the AI Impact analytics dashboard. Here are some of the highlights:\n\n1. New tile visualizations such as \"GitLab Duo Seats: Assigned and Used,\" \"Code Suggestions: Acceptance Rate %,\" and \"GitLab Duo Chat: Unique Users\"  to gain a deeper insight into usage patterns for GitLab Duo.\n\n![AI Impact - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-07-17_at_12.50.31_aHR0cHM6_1750098620999.png)\n\n2. New comparison bar chart to help users observe how changes in one metric correlate with changes in others:\n\n![AI Impact comparison bar chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098621000.png)\n\n3. AI statistics in the [Contribution analytics report](https://docs.gitlab.com/ee/user/group/contribution_analytics/index.html) to understand how users interact with AI features. See which users are leveraging AI features and whether their performance has changed over time:\n\n![Contribution analytics report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098621/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098621001.png)\n\n## Get started today\n\nWe're excited about the potential of the AI Impact analytics dashboard to not only demonstrate the real-world business outcomes of AI but also to drive more informed decisions regarding future AI as optimization for the DevSecOps lifecycle. For more information about what is coming next and to share feedback or questions, [please visit our AI Impact analytics dashboard epic](https://gitlab.com/groups/gitlab-org/-/epics/12978).\n\nStart your [free trial of GitLab Duo and the AI Impact analytics dashboard today](https://about.gitlab.com/gitlab-duo/#free-trial).\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[9,1083,717],{"slug":1245,"featured":91,"template":693},"developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai","content:en-us:blog:developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai.yml","Developing Gitlab Duo Ai Impact Analytics Dashboard Measures The Roi Of Ai","en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai.yml","en-us/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai",{"_path":1251,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1252,"content":1258,"config":1266,"_id":1268,"_type":14,"title":1269,"_source":16,"_file":1270,"_stem":1271,"_extension":19},"/en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"title":1253,"description":1254,"ogTitle":1253,"ogDescription":1254,"noIndex":6,"ogImage":1255,"ogUrl":1256,"ogSiteName":706,"ogType":707,"canonicalUrls":1256,"schema":1257},"GitLab Duo: AI-powered CI/CD pipeline root cause analysis","Discover how we've infused Root Cause Analysis with AI to help remedy broken CI/CD pipelines, including example scenarios and take-away exercises.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097321/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097321081.png","https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rutvik Shah\"},{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-06\",\n      }",{"title":1259,"description":1254,"authors":1260,"heroImage":1255,"date":1263,"body":1264,"category":10,"tags":1265},"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines",[1261,1262],"Rutvik Shah","Michael Friedrich","2024-06-06","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nHave you ever encountered a broken [CI/CD](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) pipeline and had to halt your DevSecOps workflow, or even delay software deployment, as you try to figure out the root cause? Traditionally, when something goes wrong in the process of creating software, developers have to troubleshoot, dig through log files, and often do a lot of trial and error development. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/gitlab-duo/), part of our suite of AI-powered features, removes the guesswork by determining the root cause for a failed CI/CD pipeline. In this article, you'll learn what Root Cause Analysis is and how to apply the AI-powered GitLab Duo feature to your DevSecOps workflow.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### What is Root Cause Analysis?\n\nGitLab Duo Root Cause Analysis is an AI-powered feature that assists you in determining a root cause and suggesting a fix for a CI/CD job log failure by analyzing the logs.\n\nWhile Root Cause Analysis is often seen in product incident management, its workflows and debugging practices can be found in any DevSecOps workflow. Ops teams, administrators, and platform engineers are challenged by infrastructure-as-code (IaC) deployment errors, Kubernetes and GitOps problems, and long stack traces while investigating pipeline failures.\n\nGitLab Duo Root Cause Analysis keeps everyone in the same interface and uses AI-powered help to summarize, analyze, and propose fixes so that organizations can release secure software faster.\n\nA pipeline can encounter failures for a variety of reasons, including syntax errors in the code, missing dependencies that the pipeline relies on, test failures during the build process, Kubernetes and IaC deployment timeouts, and numerous other potential issues. When such failures occur, it becomes the responsibility of everyone to meticulously review the logs generated by the pipeline. This job log review process involves scrutinizing the detailed output to identify the specific errors and pinpoint the root cause of the pipeline failure. For example, the following pipeline has multiple job failures that need to be investigated and fixed.\n\n![Image depicting multiple job failures](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097332601.png)\n\nThe duration required to fix these failures can vary significantly and is largely influenced by several factors such as:\n- the developer's familiarity with the project\n- their level of experience in dealing with similar issues\n- their overall skill level in troubleshooting and problem-solving within the context of the pipeline.\n\nManual analysis can be exceedingly challenging and time-consuming, given that log data consists of application logs and system messages with a wide variety of potential sources of failures. A typical pipeline fix can consist of several iterations and context switching. The complexity and the unstructured nature of the logs is a perfect fit for speeding up the task using generative AI.  Using AI can reduce the time to identify and fix a pipeline error significantly and also lower the barrier of expertise that would be needed to fix a pipeline such as the above.\n\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n \u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n### How does Root Cause Analysis work?\n\n[Root Cause Analysis](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) works by forwarding a portion of the CI/CD job log to the [GitLab AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/). GitLab ensures that the portion sent will fit inside the large language model (LLM) token limits alongside a prompt that has been pre-crafted to provide insights into why the job might have failed. The prompt also instructs the LLM to provide an example of how a user might fix a broken job.\n\nHere are two example scenarios where Root Cause Analysis can provide assistance.\n\n#### 1. Analyze a Python dependency error\n\nA Python application can import package modules with functionality that is not provided in the standard library. The project [Challenge - Root Cause Analysis - Python Config](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config) implements an application that parses configuration and initializes an SQLite database, which both work well without any dependencies. It uses best practices in CI/CD with a Python environment and caching. The latest feature implementation adds a Redis caching client, and now the CI/CD build is failing for some reason. \n\nBy using Root Cause Analysis, you can immediately learn that the `ModuleNotFoundError` text means that the module is actually not installed in the Python environment. GitLab Duo also suggests an example fix: Installing the Redis module through the PIP package manager. \n\n![Image depicting 'modulenotfounderror' and GL Duo suggested resolution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097332602.png)\n\nThe failing pipeline can be viewed [here](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/jobs/6992716398). \n\nThe Root Cause Analysis prompt provides a summary of the problem, which seems to be a problem with a missing `redis` module. Let's try to fix the problem by installing the `redis` module. You can either call `pip install redis` in the CI/CD job `script` section, or use a more sophisticated approach with the `requirements.txt` file. The latter is useful for a single source of truth for dependencies installed in the development environment and CI/CD pipelines.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n```\n\nAfter fixing the missing Python dependency, the CI/CD job fails again. Use Root Cause Analysis again to learn that no Redis service is running in the job. Switch to using GitLab Duo Chat and use the prompt `How to start a Redis service in CI/CD` to learn how to configure the `services` attribute in the CI/CD job.\n\n![Depicts the prompt for how to start a Redis service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097333/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097332602.png)\n\nModify the `.gitlab-ci.yml` with the `test` job, and specify the `redis` service.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n\n  # Solution 3 - Running Redis\n  services:\n    - redis\n```\n\nRunning the Redis server allows you to successfully execute the Python application, and print its output into the CI/CD job log.\n\n![output of Python application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097332603.png)\n\nThe solution is provided in the [solution/ directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/tree/main/solution?ref_type=heads).\n\n**Tip:** You can also ask [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to follow up on potential future problems:\n\n```markdown\nHow to lint Python code? Which tools are recommended for CI/CD.\nHow to pin a package version in Python requirements file?\t\nWhat are possible ways that this exception stacktrace is triggered in the future?\nAre there ways to prevent the application from failing?\n``` \n\nThe next example is more advanced and includes multiple failures. \n\n#### 2. Analyze missing Go runtime\n\nCI/CD jobs can be executed in containers, spawned from the contributed `image` attribute. If the container does not provide a programming language runtime, the executed `script` sections referencing the `go` binary fail. For example, the error message `/bin/sh: eval: line 149: go: not found` needs to be understood and fixed. \n\nIf the `go` command is not found in the container's runtime context, this can have multiple reasons:\n\n1. The job uses a minimal container image, for example `alpine`, and the Go language runtime was not installed.\n1. The job uses the wrong default container image, for example, specified on top of the CI/CD configuration, or using the `default` keyword.\n1. The job does not use a container image but the shell executor. The host operating system does not have the Go language runtime installed, or it is otherwise broken/not configured.\n\nThe project [Challenge - Root Cause Analysis - Go GitLab Release Fetcher](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides an exercise challenge to analyze and fix CI/CD problems with a GitLab release fetcher application, written in Go. The `build` and `docker-build` CI/CD jobs are failing. Fixing the problem requires different scopes: Understanding why the Go runtime is not installed, and learning about the `Dockerfile` syntax. \n\n![Screenshot showing Change Docker Label job failed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097332603.png)\n\nThe [`solution/` directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides two possible solutions after Root Cause Analysis. \n\n## Practice using Root Cause Analysis\n\nHere are some scenarios to use to practice using Root Cause Analysis.\n\n- When you are running into Kubernetes deployment errors or timeouts. \n\n- With OpenTofu or Terraform IaC pipelines failing to provision your cloud resources.\n\n- When the Ansible playbook fails with a cryptic permission error in CI/CD.\n\n- When the Java stack trace is 10 pages long.\n\n- With a shell script highlighting an execution error.\n\n- When a Perl script fails in a single line, which is the only line in the script.\n\n- When the CI/CD job times out and it is unclear which section would cause this.\n\n- When a network connection timeout is reached, and you think it cannot be DNS.\n\n### What is next for GitLab Duo Root Cause Analysis?\n\nWe want to help our users to get their pipelines back to passing in fewer iterations. The Root Cause Analysis will open and show the response in GitLab Duo Chat, our AI assistant. Users can build on the recommendation to generate a more precise fix by asking specific questions (e.g., programming language-specific fixes) or asking for alternative fixes based on the root cause.\n\nFor example, here is the Root Cause Analysis for a failing job:\n\n![Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097332603.png)\n\nUsers can ask follow-up questions that build upon the AI-generated response.\n\n- I do not want to create my own Docker image. Please explain different ways to fix the problem.\n\n- I don't have access to the Docker image creation. It seems that the Go binary is missing. Are there alternative images you can suggest?\n\nGitLab also will be running quality benchmarks for the generated responses and shipping usability improvements.\n\nPlease see our [Root Cause Analysis GA epic](https://gitlab.com/groups/gitlab-org/-/epics/13080) for more details. We would also love your feedback on the feature. Please leave a comment on our [Root Cause Analysis feedback issue](https://gitlab.com/groups/gitlab-org/-/epics/13872).\n\n## Get started with Root Cause Analysis\n\nPlease see our [documentation](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) on how to enable the feature available to our GitLab Ultimate customers. Also, GitLab Duo Root Cause Analysis will soon be coming to GitLab self-managed and GitLab Dedicated.\n\nNot a GitLab Ultimate customer? Start [a free trial](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today. \n\n## Read more of our \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)",[9,737,716,496,717],{"slug":1267,"featured":91,"template":693},"developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","content:en-us:blog:developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","Developing Gitlab Duo Blending Ai And Root Cause Analysis To Fix Ci Cd","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"_path":1273,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1274,"content":1280,"config":1285,"_id":1287,"_type":14,"title":1288,"_source":16,"_file":1289,"_stem":1290,"_extension":19},"/en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"title":1275,"description":1276,"ogTitle":1275,"ogDescription":1276,"noIndex":6,"ogImage":1277,"ogUrl":1278,"ogSiteName":706,"ogType":707,"canonicalUrls":1278,"schema":1279},"Developing GitLab Duo: How we are dogfooding our AI features","As part of our blog series, we share real-world examples of how we integrate AI throughout our software development lifecycle and how we use metrics to gauge their success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098360/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098360821.png","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we are dogfooding our AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-20\",\n      }",{"title":1275,"description":1276,"authors":1281,"heroImage":1277,"date":1282,"body":1283,"category":10,"tags":1284},[1220],"2024-05-20","***Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.***\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, has transformed our internal engineering workflows, driving efficiency gains across our development process. As strong proponents of dogfooding and transparency, we wanted to showcase how our teams leverage AI, including standouts like GitLab Duo Code Suggestions and GitLab Duo Chat, daily to streamline development processes, reduce manual effort, and enhance productivity. You'll learn about the benefits we've experienced for highly technical teams like engineering to less technical teams such as technical writing and product management.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Real-world use cases\n\nOur teams have integrated [GitLab Duo's many features](https://about.gitlab.com/gitlab-duo/#features) into their daily routines. Here are some examples of how GitLab Duo is helping them carry out everyday activities.\n\n### Summarization and documentation\n- **Streamline the code review process:** Staff Backend Developer [Gosia Ksionek](https://about.gitlab.com/company/team/#mksionek) showcases the practical benefits of AI in her workflow by using GitLab Duo to streamline the code review process. She effectively utilizes GitLab Duo to [summarize merge requests](https://youtu.be/3SIhe8dgFEc), making it easier and faster to review code changes. In addition to summarizing merge requests, Gosia also leverages GitLab Duo to [answer coding questions](https://www.youtube.com/watch?v=6n0I53XsjTc) and [explain complex code snippets](https://www.youtube.com/watch?v=3m2YRxa1SCY). This enhances her productivity and helps her better understand and manage intricate codebases. Through these demonstrations, Gosia highlights how GitLab Duo can significantly improve efficiency and clarity in the development process, making it an invaluable tool for developers.\n\n\u003Ccenter>\n\nWatch Gosia use GitLab Duo Merge Request Summary:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3SIhe8dgFEc?si=Q8JG3Ix3K_THhbpv\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to answer coding questions: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6n0I53XsjTc?si=LA9VBHrgXpfJImSL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to explain complex code snippets:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3m2YRxa1SCY?si=oms3szKwZoz-4yeq\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n- **Condense comment threads:** [Bartek Marnane](https://about.gitlab.com/company/team/#bmarnane), Vice President of Expansion Software Development, uses GitLab Duo to condense lengthy comment threads into concise summaries, ensuring all relevant details are captured when updating issue descriptions.\n\n- **Create new documentation:** [Taylor McCaslin](https://about.gitlab.com/company/team/#tmccaslin), Group Manager, Product - Data Science Section, leveraged GitLab Duo to [create new documentation for GitLab Duo itself](https://docs.gitlab.com/ee/user/ai_features.html), exemplifying a meta use case that enhances clarity and consistency and greatly reduces the time to document new features.\n\n- **Craft release notes:** [Amanda Rueda](https://about.gitlab.com/company/team/#amandarueda), Senior Product Manager for Product Planning, uses GitLab Duo to [craft brief, impactful summaries for release notes](https://gitlab.com/groups/gitlab-org/-/epics/10267), highlighting changes and their value to users. By using well-crafted prompts like below, Amanda supercharges her workflow and ensures that each release note is clear, concise, and user-focused, enhancing the overall communication and user experience:\u003Cbr>\u003Cbr>\n*“Please create a two sentence summary of this change, which can be used for our release notes. The tone should be conversational and should be in second person. The summary should include a description of the problem or change and be tied to the value we are creating for you, the user.”*\n\u003Cbr>\u003Cbr>\n    - Here are some examples of release notes co-created with GitLab Duo:\n      - [Expanded options for sorting your Roadmap](https://gitlab.com/gitlab-org/gitlab/-/issues/460492)\n      - [Issue Board Clarity now with Milestone & Iteration](https://gitlab.com/gitlab-org/gitlab/-/issues/25758)\n      - [Design Management Features Extended to Product Teams](https://gitlab.com/gitlab-org/gitlab/-/issues/438829)\n\n- **Optimize docs site navigation:** [Suzanne Selhorn](https://about.gitlab.com/company/team/#sselhorn), Staff Technical Writer, tapped GitLab Duo to [optimize the left navigation of documentation](https://docs.gitlab.com/ee/user/get_started/get_started_projects.html) by providing a workflow-based order of pages. Suzanne provided a list of features to GitLab Duo, which generated the optimal order, updating the left navigation to match. GitLab Duo also drafted the [Getting Started](https://docs.gitlab.com/ee/user/get_started/get_started_planning_work.html) documentation much faster than were she to use traditional, manual approaches.\n\n### Goal setting and team alignment\n- **Draft and refine OKRs:** [François Rosé](https://about.gitlab.com/company/team/#francoisrose), Engineering manager, Create:Code Review Backend, finds [GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) invaluable for drafting and refining OKRs. By articulating objectives more clearly and effectively, François enhances goal setting and team alignment. Using Chat, François ensures that each OKR is precise, actionable, and aligned with the team's goals, thereby improving overall team performance and cohesion. Here is an example prompt he uses:\u003Cbr>\u003Cbr>\n\n    *\"Here is an OKR I am thinking of creating:*\n\n    *Objective: Retrospect on retrospectives, to foster a thriving team*\n\n    *KR: Measure retrospective satisfaction from 100% of team members*\n\n    *KR: Identify 3 improvements to the async retrospectives*\n\n    *KR: Implement 1 improvement*\n\n    *Please provide direct feedback on how to improve the formulation of this objective and these key results.\"*\n\u003Cbr>\u003Cbr>\n\n- **Streamlined hiring and recruitment processes:** Chat helped [Denys Mishunov](https://about.gitlab.com/company/team/#dmishunov), Staff Frontend Engineer, formulate a clear and concise text for updating the email template for technical interview candidates. The team collaborated on refining the communication to ensure candidates receive all necessary information using a merge request. This example showcased the practical application of AI tools in enhancing communication processes within the hiring workflow.\n\n### Incident response and configuration\n- **Summarize production incidents:** [Steve Xuereb](https://about.gitlab.com/company/team/#sxuereb), Staff Site Reliability Engineer, employs GitLab Duo to summarize production incidents and create detailed incident reviews, streamlining the documentation process.\n\n- **Create boilerplate `.gitlab-ci.yml` files:**  Steve also uses Chat to create boilerplate `.gitlab-ci.yml` files, which significantly sped up his workflow. [Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) serves as a valuable partner for suggesting ideas. Additionally, [Code Explanation](https://docs.gitlab.com/ee/user/ai_features.html#code-explanation) provides detailed answers that are helpful during incidents, enhancing his productivity and understanding of the codebase.\n\n### Code generation and testing\n- **Full-stack development:** [Peter Hegman](https://about.gitlab.com/company/team/#peterhegman), Senior Frontend Engineer, has been using [Code Suggestions for his JavaScript and Ruby development](https://gitlab.com/gitlab-org/gitlab/-/issues/435783#note_1731321963). This highlights that Code Suggestions has become a powerful tool for developers moving across a full technical stack. \n\n- **Generate Python scripts:** Denys conducted [an experiment using GitLab Duo for a non-GitLab task](https://gitlab.com/gitlab-org/ai-powered/ai-framework/ai-experimentation). This example highlights the flexibility and utility of our AI tools beyond typical software development tasks.\n\n\u003Ccenter>\nWatch how Denys uses GitLab Duo to generate Python scripts to fetch content data and store it locally:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/30ZTtk4K5yU?si=p5ZcFLg6dTZL5gFE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n### Research and support\n- **Generate test source code:**  [Michael Friedrich](https://about.gitlab.com/company/team/#dnsmichi), Senior Developer Advocate, uses GitLab Duo to generate test source code for CI/CD components. This approach has been shared in various talks and presentations, such as the recent Open Source @ Siemens event ([public slides](https://go.gitlab.com/duA2Fc)). Using GitLab Duo in this manner helps ensure that the code is consistent, well-documented, and aligned with our best practices. Check out his [Rust example](https://gitlab.com/components/rust#contributing).\n\n![Rust example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098367547.png)\n\n- **Streamline research tasks:** Our team members consistently turn to Chat when they have questions about GitLab features, streamlining their research and support tasks. Michael shared, \"When I have a question about GitLab features, I default to using Chat instead of opening 100 browser tabs. This workflow helps me assist users on our community forum efficiently. For instance, I recently [helped a user with SSH deployment](https://forum.gitlab.com/t/how-to-make-ssh-deployment-more-clear-in-gitlab/102051/4?u=dnsmichi) using this method.\" Using Chat not only saves time but also provides quick, accurate information, enhancing the support we offer to our community.\n\n### Feature testing\n- **Test new features:** Our engineers use GitLab Duo to test new features like [Markdown support in Code Suggestions](https://gitlab.com/gitlab-org/gitlab/-/issues/443365). One of our team members noted, \"I need to test Markdown support in Code Suggestions for writing blog posts and GitLab docs in VS Code. I saw it was merged for 17.0.\" By testing these features internally, we ensure they meet our quality standards before release.\n\n### Understanding external codebases\n- **Explain external projects:** GitLab Duo's `/explain` feature is particularly useful for understanding external projects imported into GitLab. This capability was highlighted in a recent livestream he did with open source expert Eddie Jaoude. Michael let us know, \"I use `/explain` on external projects to understand the source code. I pitched this idea for learning about open source projects, dependencies, etc. during the livestream.\" This feature is invaluable for developers who need to quickly grasp the functionality and dependencies of unfamiliar codebases, significantly improving their efficiency and understanding.\n\n\u003Ccenter>\nWatch Michael demo `/explain` during a livestream with Eddie Jaoude:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/L2Mx8hOhkEE?si=R7W3v4EDqeJCaPOw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n## GitLab Duo's benefits\n\nThe integration of GitLab Duo has brought about numerous positive impacts, significantly enhancing our engineering and product development workflows:\n\n- Many tasks that previously required manual intervention are now automated, freeing up valuable time for our engineers. For example, summarizing long threads and creating boilerplate code are now more efficient, allowing our team to focus on more complex issues.\n- The time taken to document and summarize issues has decreased, allowing for quicker information dissemination and decision-making.\n- With AI-assisted code suggestions and explanations, our teams produce higher quality code with fewer errors and faster debugging processes. The integration of GitLab Duo into incident reviews and coding assistance has led to more efficient and effective code reviews.\n- Administrative tasks, such as drafting OKRs and creating release notes, have been streamlined. \n\nGitLab Duo has helped to not only improve our efficiency but also to enhance the quality and speed of our development processes, illustrating the transformative power of AI in software development.\n\n## What's next?\n\nWe are committed to further integrating AI into our workflows and continuously improving GitLab Duo features based on internal feedback and evolving needs. The ongoing collection of use cases and metrics with the [AI Impact analytics dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) will guide enhancements and ensure that GitLab Duo remains at the forefront of AI-driven development tools.\n\n![Dogfooding Duo - AI analytics dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098367547.png)\n\n> [Get started using GitLab Duo today with our free trial.](https://about.gitlab.com/gitlab-duo/#free-trial)\n\n## Read more \"Developing GitLab Duo\"\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)",[9,834,717,496,759],{"slug":1286,"featured":91,"template":693},"developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","content:en-us:blog:developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","Developing Gitlab Duo How We Are Dogfooding Our Ai Features","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"_path":1292,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1293,"content":1298,"config":1304,"_id":1306,"_type":14,"title":1307,"_source":16,"_file":1308,"_stem":1309,"_extension":19},"/en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"title":1294,"description":1295,"ogTitle":1294,"ogDescription":1295,"noIndex":6,"ogImage":704,"ogUrl":1296,"ogSiteName":706,"ogType":707,"canonicalUrls":1296,"schema":1297},"Developing GitLab Duo: How we validate and test AI models at scale","Our blog series debuts with a behind-the-scenes look at how we evaluate LLMs, match them to use cases, and fine-tune them to produce better responses for users.","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we validate and test AI models at scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Susie Bitters\"}],\n        \"datePublished\": \"2024-05-09\",\n      }",{"title":1294,"description":1295,"authors":1299,"heroImage":704,"date":1301,"body":1302,"category":10,"tags":1303},[1300],"Susie Bitters","2024-05-09","**_Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers._**\n\nGitLab values the trust our customers place in us. Part of maintaining that trust is transparency in how we build, evaluate, and ensure the high-quality functionality of our [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI features. GitLab Duo features are powered by a diverse set of models, which allows us to support a broad set of use cases and gives our customers flexibility. GitLab is not tied to a single model provider by design. We currently use foundation models from [Google](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/vertex_text.py?ref_type=heads#L86) and [Anthropic](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/blob/main/ai_gateway/models/anthropic.py?ref_type=heads#L62). However, we continuously assess what models are the right matches for GitLab Duo’s use cases. In this article, we give you an inside look at our AI model validation process.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Understanding LLMs\n\nLarge language models (LLMs) are generative AI models that power many AI features across the platform. Trained on vast datasets, LLMs predict the next word in a sequence based on preceding context. Given an input prompt, they generate human-like text by sampling from the probability distribution of words conditioned on the prompt.\n\nLLMs enable intelligent code suggestions, conversational chatbots, code explanations, vulnerability analysis, and more. Their ability to produce diverse outputs for a given prompt makes standardized quality evaluation challenging. LLMs can be optimized for different characteristics, which is why there are so many AI models actively being developed.\n\n## Testing at scale\n\nUnlike traditional software systems where inputs and outputs can be more easily defined and tested, LLMs produce outputs that are often nuanced, diverse, and context-dependent. Testing these models requires comprehensive strategies that account for subjective and variable interpretations of quality, as well as the stochastic nature of their outputs. We, therefore, cannot judge the quality of an LLM’s output in an individual or anecdotal fashion; instead, we need to be able to examine the overall pattern of an LLM's behavior. To get a sense of those patterns, we need to test at scale. Testing at scale refers to the process of evaluating the performance, reliability, and robustness of a system or application across a large and diverse array of datasets and use cases. Our [Centralized Evaluation Framework (CEF)](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/) utilizes thousands of prompts tied to dozens of use cases to allow us to identify significant patterns and assess the overall behavior of our foundational LLMs and the GitLab Duo features in which they are integrated.\n\nTesting at scale helps us:\n\n- **Ensure quality:** Testing at scale enables us to assess the quality and reliability of these models across a wide range of scenarios and inputs. By validating the outputs of these models at scale, we can start to identify patterns and mitigate potential issues such as systematic biases, anomalies, and inaccuracies. \n- **Optimize performance:** Scaling up testing efforts allows GitLab to evaluate the performance and efficiency of LLMs under real-world conditions. This includes assessing factors such as output quality, latency, and cost to optimize the deployment and operation of these models in GitLab Duo features.\n- **Mitigate risk:** Testing LLMs at scale helps mitigate the risks associated with deploying LLMs in critical applications. By conducting thorough testing across diverse datasets and use cases, we can identify and address potential failure modes, security vulnerabilities, and ethical concerns before they impact our customers.\n\nTesting LLMs at scale is imperative for ensuring their reliability and robustness for deployment within the GitLab platform. By investing in comprehensive testing strategies that encompass diverse datasets, use cases, and scenarios, GitLab is working to unlock the full potential of AI-powered workflows while mitigating potential risks.\n\n### How we test at scale\n\nThese are the steps we take to test LLMs at scale.\n\n#### Step 1: Create a prompt library as a proxy for production\nWhile other companies view and use customer data to train their AI features, GitLab currently does not.  As a result, we needed to develop a comprehensive prompt library that is a proxy for both the scale and activity of production.\n\nThis prompt library is composed of questions and answers. The questions represent the kinds of queries or inputs that we would expect to see in production, while the answers represent a ground truth of what our ideal answer would be. This ground truth answer could also be mentally framed as a target answer. Both the question and the answer may be human generated, but are not necessarily so. These question/answer pairs give us a basis for comparison and a reference frame that allow us to tease out differences between models and features. When multiple models are asked the same question and generate different responses, we can use our ground truth answer to determine which model has provided an answer that is most closely aligned to our target and score them accordingly.\n\nAgain, a key element of a comprehensive prompt library is ensuring that it is representative of the inputs that we expect to see in production. We want to know how well foundational models fit to our specific use case, and how well our features are performing. There are numerous benchmark prompt datasets, but those datasets may not be reflective of the use cases that we see for features at GitLab. Our prompt library is designed to be specific to GitLab features and use cases.\n\n#### Step 2: Baseline model performance\n\nOnce we have crafted a prompt library that accurately reflects production activity, we feed those questions into [various models](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/foundation_models/) to test how well they serve our customer’s needs. We compare each response to our ground truth and provide it a ranking based on a series of metrics including: [Cosine Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#similarity-scores), [Cross Similarity Score](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#cross-similarity-score),  [LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#llm-judge), and [Consensus Filtering with an LLM Judge](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/#consensus-filtering-with-llm-judge). This first iteration provides us a baseline for how well each model is performing, and guides our selection of a foundational model for our features. For brevity, we won’t go into the details here, but we encourage you to [learn more about more about the metrics here](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/metrics/). It is important to note this isn’t a solved problem; the wider AI industry is actively researching and developing new techniques. GitLab’s model validation team keeps a pulse on the industry and is continuously iterating on how we measure and score the LLMs GitLab Duo uses.  \n\n#### Step 3: Feature development\n\nNow that we have a baseline for our selected model's performance, we can start developing our features with confidence. While prompt engineering gets a lot of buzz, focusing entirely on changing the behavior of a model via prompting (or any other technique) without validation means that you are operating in the dark and very possibly overfitting your prompting. You may solve one problem, but be causing a dozen more. You would never know. Creating a baseline for a model's performance allows us to track how we are changing behavior over time for all our necessary use cases. At GitLab, we re-validate the performance of our features on a daily basis during active development to help ensure that all changes improve the overall functionality.\n\n#### Step 4: Iterate, iterate, iterate\n\nHere is how our experimental iterations work. Each cycle, we examine the scores from our tests at scale to identify patterns:\n\n- What are the commonalities across our weakest areas?\n- Is our feature performing poorly based on a specific metric or on a certain use case?\n- Do we see consistent errors popping up in response to a certain kind of question?\n\nOnly when we test at scale do these kinds of patterns begin to emerge and allow us to focus our experiments. Based on these patterns, we propose a variety of experiments or approaches to try to improve performance in a specific area and on a specific metric.\n\nHowever, testing at scale is both expensive and time-consuming. To enable faster and less expensive iteration, we craft a smaller scale dataset to act as a mini-proxy. The focused subset will be weighted to include question/answer pairs that we know we want to improve upon, and the broader subset will also include sampling of all the other use cases and scores to ensure that our changes aren't adversely affecting the feature broadly. Make your change and run it against the focused subset of data. How does the new response compare to the baseline? How does it compare to the ground truth?\n\nOnce we have found a prompt that addresses the specific use case we are working on with the focused subset, we validate that prompt against a broader subset of data to help ensure that it won’t adversely affect other areas of the feature. Only when we believe that the new prompt improves our performance in our target area through validation metrics AND doesn’t degrade performance elsewhere, do we push that change to production.\n\nThe entire Centralized Evaluation Framework is then run against the new prompt and we validate that it has increased the performance of the entire feature against the baseline from the day before. In this way, GitLab is constantly iterating to help ensure that you are getting the latest and greatest performance of AI-powered features across the GitLab ecosystem. This allows us to ensure that we keep working faster, together.\n\n### Making GitLab Duo even better\n\nHopefully this gives you insight into how we’re responsibly developing GitLab Duo features. This process has been developed as we’ve brought [GitLab Duo Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) and [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to general availability. We’ve also integrated this validation process into our development process as we iterate on GitLab Duo features. It’s a lot of trial and error, and many times fixing one thing breaks three others. But we have data-driven insights into those impacts, which helps us ensure that GitLab Duo is always getting better.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n ## Resources\n - [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/)\n - [GitLab's AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/)\n - [GitLab AI-powered Direction page](https://about.gitlab.com/direction/ai-powered/)\n\n\u003Cfigure class=video_container>\n\u003Ciframe width=560 height=315 src=\"https://www.youtube-nocookie.com/embed/LifJdU3Qagw?si=A4kl6d32wPYC4168\" title=\"YouTube video player\" frameborder=0 allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen=\"\">\u003C/iframe>\n\u003C/figure>\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/) \n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)",[9,716,496,717,1082],{"slug":1305,"featured":91,"template":693},"developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale","content:en-us:blog:developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","Developing Gitlab Duo How We Validate And Test Ai Models At Scale","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale.yml","en-us/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale",{"_path":1311,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1312,"content":1317,"config":1323,"_id":1325,"_type":14,"title":1326,"_source":16,"_file":1327,"_stem":1328,"_extension":19},"/en-us/blog/developing-gitlab-duo-series",{"title":1313,"description":1314,"ogTitle":1313,"ogDescription":1314,"noIndex":6,"ogImage":704,"ogUrl":1315,"ogSiteName":706,"ogType":707,"canonicalUrls":1315,"schema":1316},"Developing GitLab Duo series","Our unique blog series, written by our Product and Engineering teams, takes you behind the scenes of our AI innovation and guides you through our newest AI features powering your DevSecOps workflow.","https://about.gitlab.com/blog/developing-gitlab-duo-series","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo series\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2024-06-03\",\n      }",{"title":1313,"description":1314,"authors":1318,"heroImage":704,"date":1320,"body":1321,"category":10,"tags":1322},[1319],"GitLab Team","2024-06-03","Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## 1. [How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n\n- Our blog series debuts with a behind-the-scenes look at how we evaluate LLMs, match them to use cases, and fine-tune them to produce better responses for users.\n\n## 2. [AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- We spotlight a new feature that provides detailed metrics, such as the Code Suggestions Usage Rate, to help understand the effectiveness of AI investments.\n\n## 3. [How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n\n- We share real-world examples of how we integrate AI throughout our software development lifecycle and how we use metrics to gauge their success.\n\n## 4. [Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n\n- Learn step-by-step how to enhance AI-generated code reliability and security using GitLab Duo and GitLab Pages (includes code samples and prompts).\n\n## 5. [Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n\n- Discover how we've infused Root Cause Analysis with AI to help remedy broken CI/CD pipelines, including example scenarios and take-away exercises.\n\n## 6. [Developing GitLab Duo: A roundup of recent Chat enhancements](https://about.gitlab.com/blog/developing-gitlab-duo-a-roundup-of-recent-chat-enhancements)\n- Discover the latest improvements to GitLab Duo Chat, including prompt cancellation and architectural upgrades. Learn how these updates streamline workflows and boost productivity.\n\n> Learn more about [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features for your DevSecOps workflow. Then start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to get the incredible benefits in your own organization! \n\n##  7. [Developing GitLab Duo: Use AI to remediate security vulnerabilities](https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities)\nThis tutorial shows how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help to address vulnerabilities quickly.",[9,717,716,496],{"slug":1324,"featured":6,"template":693},"developing-gitlab-duo-series","content:en-us:blog:developing-gitlab-duo-series.yml","Developing Gitlab Duo Series","en-us/blog/developing-gitlab-duo-series.yml","en-us/blog/developing-gitlab-duo-series",{"_path":1330,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1331,"content":1337,"config":1343,"_id":1345,"_type":14,"title":1346,"_source":16,"_file":1347,"_stem":1348,"_extension":19},"/en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"title":1332,"description":1333,"ogTitle":1332,"ogDescription":1333,"noIndex":6,"ogImage":1334,"ogUrl":1335,"ogSiteName":706,"ogType":707,"canonicalUrls":1335,"schema":1336},"Developing GitLab Duo: Use AI to remediate security vulnerabilities ","This tutorial shows how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help to address vulnerabilities quickly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098106/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098106040.png","https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Use AI to remediate security vulnerabilities \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2024-07-15\",\n      }",{"title":1332,"description":1333,"authors":1338,"heroImage":1334,"date":1340,"body":1341,"category":10,"tags":1342},[1262,1339],"Alana Bellucci","2024-07-15","You’ve just started into a new job, and on your first day, a large-scale\nproduction incident requires all hands on deck. There are a number of\ncritical new vulnerabilities that require immediate attention, analysis,\nmitigation and remediation. Where do you start your investigation? \n\n\nLearn how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability\nResolution, along with our other AI-powered features, can help you begin\naddressing vulnerabilities in minutes. You will learn how to benefit from\nAI-powered assistance to analyze and explain vulnerabilities in a practical\nexample. Additional remediation is highlighted with AI-generated code fixes\nin MRs to aid faster vulnerability resolution.\n\n\n> Start [a free trial of GitLab\nDuo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these\npowerful vulnerability remediation benefits to your own organization!\n\n\n## How to get started: Analyze\n\n\nThe first step is to analyze the impact and severity of the vulnerability.\nOpen the GitLab UI and navigate into the [vulnerability\nreport](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\nin the `Secure > Vulnerability Report` menu. Filter the vulnerability list\nby `SAST`, and identify the most critical vulnerabilities to work on.\n\n\n![Vulnerability reports\noverview](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/vulnerability_reports_overview_aHR0cHM6_1750098116056.png)\n\n\nThe SAST scanner results are summarized in the detail view, linking to the\nsource code. They provide details from publicly available security\nadvisories. As a developer, it is often hard to start the analysis from the\nsecurity report, unless you are fully aware of the attack scope, technical\ndetails, and vulnerable environments.\n\n\n## Understand and mitigate with Vulnerability Explanation \n\n\nUnderstanding the vulnerability and how to fix it in the best and most\nefficient way is crucial. Fixes must not break existing functionality. If\nthey do, a discussion with maintainers and product owners will be necessary,\nand, as such, will require a high-level summary and potential mitigation\nalternatives. Code that someone who left the company wrote or code that has\nno tests can make the planning for a fix even more difficult. \n\n\nAI-powered Vulnerability Explanation helps with a summary of how an attacker\ncan exploit the vulnerability, and provides more explanations about the\nimpact and potential fixes. \n\n\nThe following example shows an OS Command Injection vulnerability, using\nthis code snippet:\n\n\n```php\n\n\u003C?php \n\n\n// Read variable name from GET request\n\n$name = $_GET['name'];\n\n\n// Use the variable name to call eval and print its value \n\neval('echo $' . $name . ';');\n\n```\n\n\nThe vulnerability report does not go into much detail, and requires\nunderstanding of the full context and impact. Select `Explain vulnerability`\nfrom the upper right corner, which will open GitLab Duo Chat with a\npre-defined prompt action. This will give an additional summary of the\nvulnerability, describe how the vulnerability can be exploited, and provide\na suggested fix. \n\n\n![Improper Neutralization of\n\nSpecial Elements used in an OS Command\n\n('OS Command Injection')\n](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098116057.png)\n\n\n### Make Vulnerability Explanation a conversation with context \n\n\nYou’ll also recognize a change in UX: The previous vulnerability explanation\noverlay was replaced with a GitLab Duo Chat workflow. Sometimes, a complex\nvulnerability unfolds into multiple mitigation steps, or unclear source code\npaths.\n\n\nYou can navigate into the source code tree, and continue with the same Chat\ncontext to explain, fix, refactor, and test the code. \n\n\nLet’s try the full workflow with an example in C, where security scanning\ndetected a buffer overflow.\n\n\n1. Open the security vulnerability detail view, and select \"Explain\nvulnerability\" on the button in the upper right. This will open up the Chat\nprompt, providing a summary of the problem, potential attack vectors, and a\nproposed fix.\n\n\n![AI for vulnerabilities - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098116059.png)\n\n\n2. Review the proposed fix, and ask Chat in a follow-up prompt to share\nalternative paths, using `Can you show an alternative fix using a different\nfunction`. The idea is to learn about alternative functions to `strcpy()`\nthat can be more safe to use. \n\n\n![AI for vulnerabilities - image\n3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098116060.png)\n\n\n3. Chat comes up with an alternative fix using `strlcpy()` in the following\nexample. The function only copies as many characters as allowed in the\ntarget string, and always terminates the string with null. It also returns\nthe length of the source string to determine whether the string was\ntruncated. \n\n\n![AI for vulnerabilities - image\n5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098116062.png)\n\n\n4. Next, click on the `Location` file URL to jump into the source code view.\nOpen Chat again, and verify that the previous vulnerability explanation\ncontext is still there. As a next step, we want to add tests before\ncontinuing with a proposed fix. This helps to avoid breaking functionality\nor introduce regressions. For example, use this Chat prompt: `Based on the\nvulnerability context and opened source code, how would you add tests for\nit?`.\n\n\n![AI for vulnerabilities - image 7\n](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098116063.png)\n\n\n5. After generating tests (and assuming they were added now), you can also\nask Chat to refactor the source code, using the prompt `Can you refactor the\nsource code too?` in the same session.\n\n\n![AI for vulnerabilities - image\n6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098116063.png)\n\n\nThe workflow shows how to analyze, understand, mitigate, get alternative\napproaches, add tests, and even refactor fixes for vulnerabilities. \n\n\nYou can continue this path using Chat, and then switch into the Web IDE to\nmodify the source code after learning how to do it. Additional continued\nworkflows include committing changes and triggering CI/CD and security scans\nfor the full DevSecOps lifecycle loop. \n\n\n## Remediate with AI-assisted Vulnerability Resolution \n\n\nUnderstanding and mitigating a security vulnerability still requires\nengineering work to create a fix for the problem, run pipelines and security\nscanning in a new merge request again. It can also be necessary to deploy\nthe fixes into a staging environment and test them for a longer period of\ntime.\n\n\nAI can help here with generating a proposed fix based on the provided\ncontext of the vulnerability and source code.\n\n\nTip: Think of the most annoying vulnerability you had to fix in your career,\nand re-create the use case example for your GitLab Duo adoption. The [MITRE\nCWE Top 25 of the most dangerous software\nweaknesses](https://cwe.mitre.org/top25/archive/2023/2023_top25_list.html)\nalso provides a good starting point.  \n\n\nThe following example implements [CWE-328: Use of a weak hash\nfunction](https://cwe.mitre.org/data/definitions/328.html) by using `md5`.\nIt is correctly identified by [SAST\nscanning](https://docs.gitlab.com/ee/user/application_security/sast/). \n\n\n```python\n\nimport hashlib\n\n\nclass User:\n    def __init__(self, username, password):\n        self.username = username\n        self.password = password\n\n    def set_password(self, password):\n        self.password = hashlib.md5(password.encode()).hexdigest()\n```\n\n\n![AI for vulnerabilities - image\n8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098116064.png)\n\n\nClick on the button in the upper right `Resolve with merge request`.  This\nwill open an MR that uses AI to propose the fix. For this vulnerability, one\npossible fix could be using a different hash function. \n\n\n![AI for vulnerabilities - image\n9](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098116065.png)\n\n\nAnother common vulnerability example is not checking function error codes or\npotential exceptions. The following C code snippets implement an example for\ntiming attacks against file operations with\n[CWE-362](https://cwe.mitre.org/data/definitions/362.html) for the `fopen()`\nand `chmod()` calls. \n\n\n```c\n\n#include \u003Cstdio.h>\n\n#include \u003Cstring.h>\n\n#include \u003Csys/mman.h>\n\n#include \u003Csys/stat.h>\n\n#include \u003Cunistd.h>\n\n\nint main(int argc, char **argv) {\n\n    // File operations\n    char *fname = \"gitlab.keksi\";\n\n    FILE *fp;\n    fp = fopen(fname, \"r\");\n    fprintf(fp, \"Hello from GitLab Duo Vulnerability Resolution Challenge\");\n    fclose(fp);\n\n    // Potential chmod() timing attacks    \n\n    // Make the file world readable\n    chmod(fname, S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n\n```\n\n\nThe SAST report for `chmod()` can look like the following: \n\n\n![AI for vulnerabilities - image\n10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098116065.png)\n\n\nThe proposed `chmod()` merge request includes error handling, and fixes\nanother potential issue with world writable files, changing the permissions\nfrom `777` to `600`.\n\n\n![AI for vulnerabilities - image\n11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098116066.png)\n\n\n> Try this async exercise: Find, analyze, and fix the vulnerability for the\n`fopen()` function.\n\n\n## More AI assistance required from GitLab Duo \n\n\nOften, a security problem can be resolved with a quick fix or a workaround\nthat grants the development teams time to discuss and plan a more long-term\nsolution. In other cases, the problem becomes more complex and requires\nfeature APIs disabled, or firewall mitigation, until a proper fix can be\nrolled into production.\n\n\nGitLab Duo offers additional AI-powered features that can help resolve these\nissues. \n\n\n**Code Explanation:** As a developer or security engineer, it's crucial to\nfeel confident in the changes you've made. Within the IDE, you can use the\n[Code Explanation\nfeature](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide)\nto gain a deeper understanding of the AI-suggested fix for the\nvulnerability. This ensures you know exactly what adjustments have been made\nand why.\n\n\n**Root Cause Analysis:** If the fix breaks your pipeline, you can utilize\nthe [Root Cause Analysis\nfeature](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/).\nThis tool helps identify and explain the underlying problem, allowing you to\naddress it effectively. After applying the necessary corrections, you can\nrerun the tests to ensure a successful resolution.\n\n\n**Refactor:** Even if the vulnerability has been fixed, it's worth\nconsidering if the code can be written in a safer manner. In the IDE, you\ncan open GitLab Duo Chat and use the [refactor\naction](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide)\nto explore alternative, more secure ways to write your code. This proactive\napproach helps maintain a robust and secure codebase.\n\n\nBy leveraging these GitLab Duo features, you can confidently navigate and\nresolve vulnerabilities, ensuring your code remains secure and efficient.\n\n\n## What’s next?\n\n\nWe plan to bring both Vulnerability Explanation and Vulnerability Resolution\n\"left\" by incorporating them directly into the MR process. This integration\nensures that you can address and resolve vulnerabilities earlier in the\ndevelopment cycle, streamlining your workflow and enhancing code security\nfrom the outset.\n\n\n## Get started with GitLab Duo\n\n\nPlease see our\n[documentation](https://docs.gitlab.com/ee/user/gitlab_duo/turn_on_off.html)\non how to enable the feature available to our GitLab Ultimate customers.\nAlso, GitLab Duo [Vulnerability\nExplanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability)\nand [Vulnerability\nResolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-resolution)\nwill soon be coming to GitLab self-managed and GitLab Dedicated.\n\n\nYou can keep up with what's new in GitLab Duo by [following the \"Developing\nGitLab Duo\" blog\nseries](https://about.gitlab.com/blog/developing-gitlab-duo-series/).\n\n\n> Start [a free trial of GitLab\nDuo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these\npowerful vulnerability remediation benefits to your own organization!\n",[9,691,738,717,737],{"slug":1344,"featured":91,"template":693},"developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","content:en-us:blog:developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","Developing Gitlab Duo Use Ai To Remediate Security Vulnerabilities","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"_path":1350,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1351,"content":1357,"config":1363,"_id":1365,"_type":14,"title":1366,"_source":16,"_file":1367,"_stem":1368,"_extension":19},"/en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"title":1352,"description":1353,"ogTitle":1352,"ogDescription":1353,"noIndex":6,"ogImage":1354,"ogUrl":1355,"ogSiteName":706,"ogType":707,"canonicalUrls":1355,"schema":1356},"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS","Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2025-01-16\",\n      }",{"title":1352,"description":1353,"authors":1358,"heroImage":1354,"date":1360,"body":1361,"category":10,"tags":1362},[1359],"Jackie Porter","2025-01-16","We are thrilled to announce the GitLab Duo with Amazon Q offering, previously [shared at AWS 2024 re:Invent](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai/), is now available in Preview (Beta) for GitLab Self-Managed Ultimate users, at no additional cost. This milestone brings AI agentic experiences to organizations that maintain their own GitLab instance.\n\n### What does this mean for GitLab Self-Managed Ultimate customers? \n\nBeginning in our [17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/), GitLab Self-Managed Ultimate customers can now take advantage of the GitLab Duo with Amazon Q Preview (Beta) [capabilities](https://docs.gitlab.com/ee/user/duo_amazon_q/). There are three key experiences you will be able to access: \n- AI-powered feature development: Use the `/q dev` quick action to transform requirements into merge-ready code.\n- Automated code reviews: Leverage `/q review` for instant, intelligent feedback on code quality and security.\n- Java modernization: Streamline Java application upgrades with `/q transform`.\n\n### Getting started with the Preview (Beta) \n\nTo use these capabilities in your GitLab Self-Managed Ultimate instance:\n\n- Ensure you meet the [prerequisites](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html#prerequisites), including upgrading to GitLab 17.8, have an Ultimate subscription (no trial access), and have the instance hosted on AWS. \n- Enable your GitLab Duo with Amazon Q integration settings.\n- Configure IAM identity and roles in AWS and the GitLab AI gateway.\n- Add the Amazon Q user to the project. \n\nFor more detailed setup information, see our [documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). \n\n### Looking ahead\n\nThis Preview release represents our commitment to bringing enterprise-grade AI capabilities to all GitLab Ultimate customers. We're excited to work closely with our customers during this Preview (Beta) period to ensure GitLab Duo with Amazon Q delivers a superior experience. \nWe encourage GitLab Self-Managed Ultimate customers to begin exploring these capabilities and provide feedback. Your input will be invaluable in shaping the future of AI-powered development in GitLab.\n\n### Get started today \n\nGitLab Self-Managed Ultimate customers can begin enabling and configuring GitLab Duo with Amazon Q as outlined in our [setup documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). To learn more about how the Preview (Beta) release can transform your software development, visit our [website](https://about.gitlab.com/partners/technology-partners/aws/#interest). \nStay tuned for regular updates as we continue to enhance and expand the capabilities of GitLab Duo with Amazon Q.\n",[9,496,738,835,1181],{"slug":1364,"featured":6,"template":693},"devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","content:en-us:blog:devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","Devsecops Agentic Ai Now On Gitlab Self Managed Ultimate On Aws","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"_path":1370,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1371,"content":1377,"config":1383,"_id":1385,"_type":14,"title":1386,"_source":16,"_file":1387,"_stem":1388,"_extension":19},"/en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"title":1372,"description":1373,"ogTitle":1372,"ogDescription":1373,"noIndex":6,"ogImage":1374,"ogUrl":1375,"ogSiteName":706,"ogType":707,"canonicalUrls":1375,"schema":1376},"DevSecOps platforms give SMBs security muscle","A single platform enables teams to build, test, and deploy secure software with fewer resources.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/devsecops-platforms-give-smbs-security-muscle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms give SMBs security muscle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-10\",\n      }",{"title":1372,"description":1373,"authors":1378,"heroImage":1374,"date":1380,"body":1381,"category":1201,"tags":1382},[1379],"Sharon Gaudin","2023-01-10","\nDevOps professionals with both security training and experience come at a high price and can be hard to find. That makes it especially difficult for startups and small and medium-sized businesses (SMBs), which generally don’t have deep pockets, to get the security professionals they need.\n\nSmaller businesses often end up with no security team, so they have to hire consultants. Even worse, they might end up having little to no security help at all, which will cause problems for their customers as well as their own business.\n\nOne efficient [way to deal with that](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) is to adopt a DevSecOps platform, which enables organizations to build, test, and deploy secure software with fewer hands and [expenses](/blog/how-smbs-can-save-with-gitlabs-devops-platform/).\n\n“Someone in an SMB likely doesn’t have enough expertise, or even enough people, in-house to handle every part of DevOps, so they end up having to hire a contractor or consultant to take on things like security and monitoring, and that strains their budget,” says [Fatima Sarah Khalid](https://gitlab.com/sugaroverflow), a developer evangelist at GitLab. “By adopting the GitLab DevSecOps Platform, they can more easily handle this work, despite limited resources.”\n \n## Four benefits for SMBs\n\nSo how does a complete DevSecOps platform add security muscle to a small business? \n\n### 1. Finding vulnerabilities early\n \nWith a single, end-to-end platform, [security is integrated throughout](/stages-devops-lifecycle/secure/), and not just bolted on as an afterthought. With capabilities like dynamic and static application security testing, vulnerability management, and dependency and container scanning, developers can find vulnerabilities earlier in the process when they often can be more easily and quickly fixed. By shifting security left this way, teams can perform threat and vulnerability analysis as developers create the code - not when it’s about to be deployed. Shifting security left also creates more secure software, and decreases the time it would have taken to track down a problem created much earlier in the process.\n \n### 2. Easing work with automation\n \nAutomation, which is built into a single DevSecOps platform, is critical because it brings consistency and repeatability to the entire software lifecycle, reducing the potential for human error and minimizing the introduction of bugs and risks. And that enables SMBs to produce more secure software for their own organizations, as well as for their customers.\n \nAnother major advantage of automation is that it minimizes the need for a lot of extra hands-on and time-consuming work, like code reviews and testing. Startups and small businesses, by nature, have smaller DevOps teams. They might even have an IT team of one or two people, who do everything from building software to serving as the help desk. Saving them from having to do repetitive manual work gives them back precious time they can spend on more innovative and productive jobs.\n \nAll of that automated testing is automatically logged and documented, helping organizations create easily searchable and useful best practices that will help speed future software builds.\n \n### 3. Ensuring compliance\n \nSMBs and companies just getting off the ground don’t want to get tripped up by tricky and costly compliance issues. Luckily, the same end-to-end platform enables teams to verify the compliance of their code without leaving their workflow. In GitLab, for example, compliance confirmation lives within the platform and is automated. Developers don't have to context-switch among different point solutions, boosting their productivity and efficiency. Automating compliance also removes one more task from [developers’ already busy schedules](/blog/ease-pressure-on-smb-developers-with-a-devops-platform/).\n \n### 4. Establishing security imperatives\n \nA DevSecOps platform gives SMBs speed and efficiency, without requiring them to string together various security tools or hire security consultants. With a platform, because security practices and automation are integrated from the very start, an SMB’s DevOps environment has a solid security foundation. One solution. One answer to security needs.\n\n## Meeting the security need\n \nIn today’s environment, security and compliance are business imperatives. There’s no getting around it.\n\nSo having a strategic, end-to-end platform approach, where security and compliance are embedded from planning to production, provides efficiency and value unmatched by traditional, third-party application security vendors. Companies that may be using DevOps but are only tacking together different tools simply aren’t getting the security advantages that come from a single DevSecOps application.\n\nStartups and SMBs have a steep hill to climb just to survive. Between March 2020 and March 2021, 1 million small businesses opened in the U.S., but 833,458 closed, according to the U.S. Small Business Administration. And in a volatile economic climate, survival gets even tougher. Today’s high inflation rates and market instability have small businesses bracing for economic uncertainties, according to the [MetLife & U.S. Chamber of Commerce Small Business Index](https://www.uschamber.com/sbindex/summary).\n\nAnd those numbers are just about sheer survival. That’s not to mention actually gaining a solid foothold in an organization’s industry, attracting loyal customers, and successfully taking on bigger competitors, which just makes the hill small businesses are climbing even steeper.\n\nPrepare to make that climb easier by migrating to a single, end-to-end platform. [Download our SMB-focused ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html) learn the advantages to moving from a DIY DevOps toolchain to GitLab’s platform.\n",[917,716,691,9],{"slug":1384,"featured":6,"template":693},"devsecops-platforms-give-smbs-security-muscle","content:en-us:blog:devsecops-platforms-give-smbs-security-muscle.yml","Devsecops Platforms Give Smbs Security Muscle","en-us/blog/devsecops-platforms-give-smbs-security-muscle.yml","en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"_path":1390,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1391,"content":1397,"config":1404,"_id":1406,"_type":14,"title":1407,"_source":16,"_file":1408,"_stem":1409,"_extension":19},"/en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"title":1392,"description":1393,"ogTitle":1392,"ogDescription":1393,"noIndex":6,"ogImage":1394,"ogUrl":1395,"ogSiteName":706,"ogType":707,"canonicalUrls":1395,"schema":1396},"GPU-enabled runners for ModelOps and HPC workloads in CI/CD","Learn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682702/Blog/Hero%20Images/gitlab-data-science-icon.png","https://about.gitlab.com/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":1398,"description":1393,"authors":1399,"heroImage":1394,"date":1401,"body":1402,"category":10,"tags":1403},"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD",[1400],"Gabriel Engel","2023-07-06","\u003Ci>This blog post is the latest in an ongoing series about GitLab's journey\nto [build and integrate AI/ML into our DevSecOps\nplatform](/blog/ai-ml-in-devsecops-series/). Start with the first\nblog post: [What the ML is up with DevSecOps and\nAI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature\nblogs from our product, engineering, and UX teams to showcase how we're\ninfusing AI/ML into GitLab.\u003C/i>\n\n\nIn today's fast-paced world, organizations are constantly looking to improve\ntheir [ModelOps](/direction/modelops/) and high-performance computing (HPC)\ncapabilities. Leveraging powerful graphical processing units\n([GPUs](https://www.techtarget.com/searchvirtualdesktop/definition/GPU-graphics-processing-unit))\nhas become a game-changer for accelerating machine learning workflows and\ncompute-intensive tasks. To help meet these evolving needs, we recently\nreleased our first GPU-enabled runners on GitLab.com.\n\n\nSecurely hosting a GitLab Runner environment for ModelOps and HPC is\nnon-trivial and requires a lot of knowledge and time to set up and maintain.\nIn this blog post, we'll look at some real-world examples of how you can\nharness the potential of GPU computing for ModelOps or HPC workloads while\ntaking full advantage of a SaaS solution.\n\n\n## What are GPU-enabled runners?\n\nGPU-enabled runners are dedicated computing resources for the AI-powered\nDevSecOps platform. They provide accelerated processing power for ModelOps\nand HPC such as the training or deployment of large language models\n([LLMs](https://www.techtarget.com/whatis/definition/large-language-model-LLM))\nas part of ModelOps workloads. In the first iteration of releasing\nGPU-enabled runners, [GitLab.com SaaS\noffers](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html) the\nGCP `n1-standard-4` machine type (4 vCPU, 15 GB memory) with 1 NVIDIA T4 (16\nGB memory) attached. The runner behaves like a GitLab Runner on Linux, using\nthe docker+machine [executor](https://docs.gitlab.com/runner/executors/). \n\n\n## Using GPU-enabled runners\n\nTo take advantage of GitLab GPU-enabled runners, follow these steps:\n\n\n### 1. Have a project on GitLab.com\n\nAll projects on GitLab.com SaaS with a `Premium` or `Ultimate`\n[subscription](https://about.gitlab.com/pricing/) have the GPU-enabled\nrunners enabled by default - no additional configuration is required.\n\n\n### 2. Create a job running on GPU-enabled runners\n\nCreate a job in your `.gitlab-ci.yml` configuration file, and set the\n[runner\n`tag`](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-control-which-jobs-a-runner-can-run)\nto the `saas-linux-medium-amd64-gpu-standard` value. \n\n\n```yaml\n\ngpu-job:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n```\n\n\n### 3. Select a Docker image with the Nvidia CUDA driver\n\n\nThe CI/CD job runs in an isolated virtual machine (VM) with a\nbring-your-own-image policy as with [GitLab SaaS runners on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html).\nGitLab mounts the GPU from the host VM into your isolated environment. You\nmust use a Docker image with the GPU driver installed to use the GPU. For\nNvidia GPUs, you can use the [CUDA\nToolkit](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda)\ndirectly, or third-party images with Nvidia drivers installed, such as the\n[TensorFlow GPU image](https://hub.docker.com/r/tensorflow/tensorflow/).\n\n\nThe CI/CD job configuration for the Nvidia CUDA base Ubuntu image looks like\nthis:\n\n\n```yaml\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n```\n\n\n### 4. Verify that the GPU is working\n\nTo verify that the GPU drivers are working correctly, you can execute the\n`nvidia-smi` command in the CI/CD job `script` section. \n\n\n```yaml\n  script:\n    - nvidia-smi\n```\n\n\n## Basic usage examples\n\nLet's explore some basic scenarios where GPU-enabled runners can supercharge\nyour ModelOps and HPC workloads:\n\n\n### Example 1: ModelOps with Python\n\nIn this example, we train a model on our GPU-enabled runner defined in the\n`train.py` file using the Nvidia CUDA base Ubuntu image mentioned earlier.\n\n\n`.gitlab-ci.yml` file:\n\n```yaml\n\nmodel-training:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - apt update\n    - apt install -y --no-install-recommends python3 python3-pip \n    - pip3 install -r requirements.txt\n    - python3 --version\n    - python3 train.py\n```\n\n\n### Example 2: Scientific simulations and HPC\n\nComplex scientific simulations require significant computing resources.\nGPU-enabled runners can accelerate these simulations, allowing you to get\nresults in less time.\n\n\n`.gitlab-ci.yml` file:\n\n```yaml\n\nsimulation-run:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - ./run_simulation --input input_file.txt\n```\n\n\n## Advanced usage examples\n\nLet's go through some real-world scenarios of how we use GPU-enabled runners\nat GitLab.\n\n\n### Example 3: Python model training with a custom Docker image\n\nFor our third example, we will use this [handwritten digit recognition\nmodel](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition).\nWe are using this project as a demo to showcase or try out new ModelOps\nfeatures.\n\n\n[Open the\nproject](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition)\nand fork it into your preferred namespace. You can follow the next steps\nusing the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) in the\nbrowser, or clone the project locally to create and edit the files. Some of\nthe next steps require you to override existing configuration in the\n`Dockerfile` and `.gitlab-ci.yml`. \n\n\nAs we need more pre-installed components and want to save installation time\nwhen training the model, we decided to create a custom Docker image with all\ndependencies pre-installed. This also gives us full control over the build\nenvironment we use and allows us to reuse it locally without relying on the\n`.gitlab-ci.yml' implementation.\n\n\nIn addition, we are using a more complete pipeline configuration with the\nfollowing stages:\n\n\n```yaml\n\nstages:\n  - build\n  - test\n  - train\n  - publish\n```\n\n\n![GPU pipeline\noverview](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/pipeline-overview.png)\n\n\n#### Building a custom Docker image\n\nThe first step is to define a `Dockerfile`. In this example, we start with\nthe Nvidia CUDA base Ubuntu image and then install `Python3.10`. Using `pip\ninstall`, we then add all the required libraries specified in a\n`requirements.txt` file.\n\n\n```docker\n\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n\n\n1. Update and install required packages\n\nRUN apt-get update && apt-get install -y \\\n    python3.10 \\\n    python3.10-dev \\\n    python3-pip \\\n    && rm -rf /var/lib/apt/lists/*\n\n2. Set Python 3.10 as the default Python version\n\nRUN ln -s /usr/bin/python3.10 /usr/bin/python\n\n\n3. Copy the requirements.txt file\n\nCOPY requirements.txt /tmp/requirements.txt\n\n\n4. Install Python dependencies\n\nRUN pip3 install --no-cache-dir -r /tmp/requirements.txt\n\n```\n\n\nIn the `.gitlab-ci.yml` file we use\n[Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) to build\nthe Docker image and push it into the [GitLab Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n\n```yaml\n\nvariables:\n  IMAGE_PATH: \"${CI_REGISTRY_IMAGE}:latest\"\n  GIT_STRATEGY: fetch\n\ndocker-build:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64\n  image:\n    name: gcr.io/kaniko-project/executor:v1.9.0-debug\n    entrypoint: [\"\"]\n  script:\n    - /kaniko/executor\n      --context \"${CI_PROJECT_DIR}\"\n      --dockerfile \"${CI_PROJECT_DIR}/Dockerfile\"\n      --destination \"${IMAGE_PATH}\"\n      --destination \"${CI_REGISTRY_IMAGE}:${CI_COMMIT_TAG}\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\nIn [rules](https://docs.gitlab.com/ee/ci/yaml/#rules) we define to only\ntrigger the Docker image build for a new git tag. The reason is simple - we\ndon't want to run the image build process for every time we train the model.\n\n\nTo start the image build job [create a new Git\ntag](https://docs.gitlab.com/ee/user/project/repository/tags/#create-a-tag).\nYou can either do this by using `git tag -a v0.0.1` command or via UI.\nNavigate into `Code > Tags` and click on `New Tag`. As Tag name type\n`v0.0.1` to create a new Git tag and trigger the job.\n\n\nNavigate to `Build > Pipelines` to verify the `docker-build` job status, and\nthen locate the tagged image following [`Deploy > Container\nRegistry`](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n\n![Docker\nimage](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/gpu-docker-image.png)\n\n\n#### Testing the Docker image\n\nTo test the image, we will use the following `test-image` job and run\n`nvidia-smi` and check that the GPU drivers are working correctly.\n\n\nThe job configuration in `.gitlab-ci.yml` file looks as follows:\n\n\n```yaml\n\ntest-image:\n  stage: test\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - nvidia-smi\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\nWe also include container scanning and more [security\nscanning](https://docs.gitlab.com/ee/user/application_security/) templates\nin the `.gitlab-ci.yml` file.\n\n\n```yaml\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n  - template: Security/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n\n#### Training the model with our custom Docker image\n\nNow that we have built our Custom docker image, we can train the model\nwithout installing any more dependencies in the job.\n\n\nThe train job in our `.gitlab-ci.yml` looks like this:\n\n\n```yaml\n\ntrain:\n  stage: train\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - python train_digit_recognizer.py\n  artifacts:\n    paths:\n      - mnist.h5\n    expose_as: 'trained model'\n```\n\n\nNavigate to `Build > Pipelines` to see the job logs.\n\n\n![Train job\nlogs](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/train-job-log.png)\n\n\nFrom here, you can also inspect the `train` job artifacts.\n\n\n#### Publishing the model\n\nIn the last step of our `.gitlab-ci.yml` file, we are going to publish the\ntrained model.\n\n\n```yaml\n\npublish:\n  stage: publish\n  when: manual\n  dependencies:\n    - train\n  image: curlimages/curl:latest\n  script:\n    - 'curl --header \"JOB-TOKEN: $CI_JOB_TOKEN\" --upload-file mnist.h5 \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/MNIST-Model/${CI_COMMIT_TAG}/mnist.h5\"'\n```\n\n\nNavigate to `Build > Pipelines` and trigger the `publish` job manually.\nAfter that, navigate into `Deploy > Package Registry` to verify the uploaded\ntrained model.\n\n\n![Package\nRegistry](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/package-registry.png)\n\n\n### Example 4: Jupyter notebook model training for ML-powered GitLab Issue\ntriage\n\n\nIn the last example, we are using our GPU-enabled runner to train the\ninternal [GitLab model to triage\nissues](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/tree/using-gpu-enabled-runner).\nWe use this model at GitLab to determine and assign issues to the right team\nfrom the context of the issue description.\n\n\nDifferent from the previous examples, we now use the [`tensorflow-gpu`\ncontainer image](https://hub.docker.com/r/tensorflow/tensorflow) and install\nthe\n[requirements](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/blob/using-gpu-enabled-runner/notebooks/requirements.tensorflow-gpu.txt)\nin the job itself.\n\n\n`.gitlab-ci.yml` configuration:\n\n\n```yaml\n\ntrain:\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: tensorflow/tensorflow:2.4.1-gpu\n  script:\n    - nvidia-smi\n    - cd notebooks\n    - pip install -r requirements.tensorflow-gpu.txt\n    - jupyter nbconvert --to script classify_groups.ipynb\n    - apt-get install -y p7zip-full\n    - cd ../data\n    - 7z x -p${DATA_PASSWORD} gitlab-issues.7z\n    - cd ../notebooks\n    - python3 classify_groups.py\n  artifacts:\n    paths:\n      - models/\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" || $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n      when: manual\n      allow_failure: true\n```\n\n\n![TensorFlow\ntrain](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/tensorflow-train.png)\n\n\nIf you are interested in another Jupyter notebook example, check out our\nrecently published video on [Training ML Models using GPU-enabled\nrunner](https://youtu.be/tElegG4NCZ0).\n\n\n\u003Ciframe width=\"768\" height=\"432\"\nsrc=\"https://www.youtube.com/embed/tElegG4NCZ0\" title=\"YouTube video player\"\nframeborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write;\nencrypted-media; gyroscope; picture-in-picture; web-share\"\nallowfullscreen>\u003C/iframe>\n\n\n## Results\n\nThe integration of GPU-enabled runners on GitLab.com SaaS opens up a new\nrealm of possibilities for ModelOps and HPC workloads.\n\nBy harnessing the power of GPU-enabled runners, you can accelerate your\nmachine learning workflows, enable faster data processing, and improve\nscientific simulations, all while taking full advantage of a SaaS solution\nand avoiding the hurdles of hosting and maintaining your own build hardware.\n\n\nWhen you try the GPU-enabled runners, please share your experience in our\n[feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/403008).\n\n\nCompute-heavy workloads can take a long time. A known problem is timeouts\nafter three hours because of the current [configuration of GitLab SaaS\nrunners](https://docs.gitlab.com/ee/ci/runners/#how-saas-runners-work).\n\nWe plan to release more powerful compute for future iterations to handle\nheavier workloads faster. You can follow updates about GPU-enabled runners\nin the [GPU-enabled runners\nepic](https://gitlab.com/groups/gitlab-org/-/epics/8648) and learn more in\nthe [GPU-enabled runners\ndocumentation](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html).\n",[716,9,109,738,717],{"slug":1405,"featured":6,"template":693},"empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","content:en-us:blog:empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","Empowering Modelops And Hpc Workloads With Gpu Enabled Runners","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"_path":1411,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1412,"content":1415,"config":1420,"_id":1422,"_type":14,"title":1423,"_source":16,"_file":1424,"_stem":1425,"_extension":19},"/en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"noIndex":6,"title":1413,"description":1414},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",{"title":1413,"description":1414,"authors":1416,"heroImage":1354,"date":1417,"body":1418,"category":10,"tags":1419},[733],"2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[9,738,978,716,737,835],{"featured":91,"template":693,"slug":1421},"enhance-application-quality-with-ai-powered-test-generation","content:en-us:blog:enhance-application-quality-with-ai-powered-test-generation.yml","Enhance Application Quality With Ai Powered Test Generation","en-us/blog/enhance-application-quality-with-ai-powered-test-generation.yml","en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"_path":1427,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1428,"content":1434,"config":1440,"_id":1442,"_type":14,"title":1443,"_source":16,"_file":1444,"_stem":1445,"_extension":19},"/en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"title":1429,"description":1430,"ogTitle":1429,"ogDescription":1430,"noIndex":6,"ogImage":1431,"ogUrl":1432,"ogSiteName":706,"ogType":707,"canonicalUrls":1432,"schema":1433},"Strengthen data security with custom PII detection rulesets","This tutorial explains how GitLab's customizable Secret Detection rulesets enhance data security by identifying PII patterns in code repositories. Learn how AI can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097701/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750097700983.png","https://about.gitlab.com/blog/enhance-data-security-with-custom-pii-detection-rulesets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Strengthen data security with custom PII detection rulesets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-01\",\n      }",{"title":1429,"description":1430,"authors":1435,"heroImage":1431,"date":1437,"body":1438,"category":691,"tags":1439},[1436],"Fernando Diaz","2025-04-01","Protecting sensitive information is more critical than ever. GitLab's Secret\nDetection feature provides a powerful solution to identify and prevent the\nexposure of sensitive data. This tutorial explores how GitLab Secret\nDetection works, how to create custom rulesets for finding personally\nidentifiable information, and how GitLab Duo Chat can streamline the\ncreation of regex patterns for PII detection.\n\n\n## Understanding GitLab Secret Detection\n\n\n[GitLab Secret\nDetection](https://docs.gitlab.com/user/application_security/secret_detection/)\nis a security scanning feature integrated into the GitLab CI/CD pipeline. It\nautomatically scans your codebase to identify hardcoded secrets,\ncredentials, and other sensitive information that shouldn't be stored in\nyour repository.\n\n\n### Key benefits\n\n\n* **Data breach prevention** detects secrets before they're committed to\nyour repository.  \n\n* **Automated scanning** runs as part of your CI/CD pipeline without manual\nintervention.  \n\n* **Customizable rules** extend detection capabilities with custom\npatterns.  \n\n* **Compliance support** helps meet regulatory requirements like GDPR,\nHIPAA, and the California Privacy Protection Act.\n\n\n## Create custom rulesets for PII detection\n\n\nWhile GitLab's default secret detection covers common secrets like API keys\nand passwords, you may need custom rules to identify specific types of PII\nrelevant to your organization. \n\n\nTo get started, create a new GitLab project and follow the steps below. You\ncan follow along and see usage examples in our [PII Demo\nApplication](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset).\n\n\n**Step 1: Set up Secret Detection**\n\n\nEnsure Secret Detection is enabled in your `.gitlab-ci.yml` file:\n\n\n```\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n\nsecret_detection:\n  variables:\n    SECRET_DETECTION_EXCLUDED_PATHS: \"rules,.gitlab,README.md,LICENSE\"\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n```\n\n\n**Step 2: Create a custom ruleset file**\n\n\nCreate the directory and file `rules/pii-data-extenson.toml`, which contains\nthe regex patterns for PII data along with an allowlist of patterns to\nignore. Below are patterns to detect passport numbers (USA), phone numbers\n(USA), and email addresses:\n\n\n```toml\n\n[extend]\n\n# Extends default packaged ruleset, NOTE: do not change the path.\n\npath = \"/gitleaks.toml\"\n\n\n# Patterns to ignore (used for tests)\n\n[allowlist]\n\ndescription = \"allowlist of patterns and paths to ignore in detection\"\n\nregexTarget = \"match\"\n\nregexes = ['''555-555-5555''', '''user@example.com''']\n\npaths = ['''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)''']\n\n\n# US Passport Number (USA)\n\n[[rules]]\n\nid = \"us_passport_detection\"\n\ntitle = \"US Passport Number\"\n\ndescription = \"Detects US passport numbers\"\n\nregex = '''\\b[A-Z]{1,2}[0-9]{6,9}\\b'''\n\nkeywords = [\"passport\"]\n\n\n# Phone Number (USA)\n\n[[rules]]\n\nid = \"us_phone_number_detection_basic\"\n\ntitle = \"US Phone Number\"\n\ndescription = \"Detects US phone numbers in basic format\"\n\nregex = '''\\b\\d{3}-\\d{3}-\\d{4}\\b'''\n\nkeywords = [\"phone\", \"mobile\"]\n\n\n# Email Address\n\n[[rules]]\n\nid = \"email_address\"\n\ntitle = \"Email Address\"\n\ndescription = \"Detects email addresses\"\n\nregex = '''[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}'''\n\nkeywords = [\"email\", \"e-mail\"]\n\n```\n\n**Step 3: Extend Secret Detection with the custom ruleset file**\n\n\nCreate a directory and file `.gitlab/secret-detection-ruleset.toml` in the\nroot of your repository. This file allows you to extend the standard\nconfiguration with the PII rules file, and overwrite the severity of the\ndetected vulnerabilities (default severity is `Critical`).\n\n\n```\n\n# Define the pii rules to add to default configuration\n\n[[secrets.passthrough]]\n\ntype = \"file\"\n\ntarget = \"gitleaks.toml\"\n\nvalue = \"rules/pii-data-extension.toml\"\n\n\n# Overwrite Phone Number (USA) PII Severity\n\n[[secrets.ruleset]]\n\n[secrets.ruleset.identifier]\n\ntype = \"gitleaks_rule_id\"\n\nvalue = \"us_phone_number_detection_basic\"\n\n[secrets.ruleset.override]\n\nseverity = \"Medium\"\n\n\n# Overwrite Email Address PII Severity\n\n[[secrets.ruleset]]\n\n[secrets.ruleset.identifier]\n\ntype = \"gitleaks_rule_id\"\n\nvalue = \"email_address\"\n\n[secrets.ruleset.override]\n\nseverity = \"Low\"\n\n```\n\n\n**Step 4: Commit your changes**\n\n\nNow add the changes in the above steps to your project.\n\n\n```\n\ncd /path/to/your/project\n\ngit add .\n\ngit commit -m \"Add PII data ruleset and Secret Scanning\"\n\ngit push\n\n```\n\n\nOnce the code is committed, Secret Detection will run within the default\nbranch.\n\n\n**Step 5: Test detection of PII data**\n\n\nNow that we have configured the Secret Detection scanner, we should perform\na test to see if the scanner is detecting the new custom patterns. This can\nbe done by creating a merge request, which adds a new file named\n`customer-data.yaml` with the following:\n\n\n```yaml  \n\ncustomers:  \n  test_user:  \n    phone_number: 555-555-555  \n    email: user@example.com  \n  justin_case:  \n    phone_number: 512-123-4567  \n    passport_number: A12345678  \n    email: justin_case@example.com  \n  chris_p_bacon: \n    phone_number: 305-123-4567  \n    passport_number: B09876543  \n    email: chris_p_bacon@example.com  \n```\n\n\nThe scanner should now perform the following:\n\n\n* Ignore the `phone_number` and `email` of `test_user` due to patterns being\nin allowlist  \n\n* Detect six potential vulnerabilities due to the information present for\nboth `justin_case` and `chris_p_bacon`\n  * U.S. passport number severity is set to `Critical` (default)  \n  * U.S. phone number severity is set to `Medium` (override)  \n  * Email address severity is set to `Low` (override)\n  * Data from rules override is added to each vulnerability\n\nOnce the [merge\nrequest](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/4)\nis submitted, the Secret Detection scanner runs and provides the following\nresults:\n\n\n![Secret Detection finding custom PII data\nMR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097709/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_aHR0cHM6_1750097709683.png)\n\n\nWhen clicking on a vulnerability, you are presented with detailed\nvulnerability data based on what was configured in your newly set up rules:\n\n\n![Expanded custom PII data\nvulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097710/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_expanded_aHR0cHM6_1750097709684.png)\n\n\nThis data allows you to determine the validity of the data present and\naddress it accordingly.\n\n\nThere are additional ways to configure custom rulesets. For example, rules\ncan be applied remotely to several projects, avoiding the need to duplicate\nthe rules file. See the [Secret Detection Configuration\ndocumentation](https://docs.gitlab.com/user/application_security/secret_detection/pipeline/configure/)\nfor more information.\n\n\n## Common PII types to consider\n\n\nWhen building your custom ruleset, consider including patterns for:\n\n\n* Social Security Numbers  \n\n* Credit card numbers  \n\n* Driver's license numbers  \n\n* Passport numbers  \n\n* Email addresses  \n\n* Phone numbers  \n\n* IP addresses  \n\n* Physical addresses  \n\n* Medical record numbers\n\n\n## Leveraging GitLab Duo Chat for pattern creation\n\n\nCreating effective regex patterns can be challenging. This is where [GitLab\nDuo Chat](https://docs.gitlab.com/user/gitlab_duo_chat/) comes in handy.\nGitLab Duo Chat is an AI-powered assistant that accelerates development with\ncontextual, conversational AI. Chat is available to any user with an\nassigned GitLab Duo seat.\n\n\n### Using GitLab Duo Chat to generate PII regex patterns\n\n\n1. **Access GitLab Duo Chat:** In the upper right corner, select __GitLab\nDuo Chat__. A drawer opens on the right side of your screen.\n\n\n2. **Prompt the chat:** Enter a prompt in the chat input box and press\n__Enter__ or select __Send__:\n\n\n```\n\nI need a regex pattern to detect Canadian Zip Codes in our codebase for\nsecret detection\n\n```\n\n\n3. **Review and refine:** GitLab Duo Chat will suggest a regex pattern that\nyou can review and refine:\n\n\n```\n\nCanadian postal codes follow the format \"A1A 1A1\" (where A is a letter and 1\nis a digit). Here's a regex pattern that will detect Canadian postal codes\nin your codebase, accounting for both spaced and non-spaced formats:\n\n[A-Za-z]\\d[A-Za-z][ -]?\\d[A-Za-z]\\d\n\nThis pattern matches:\n\n* A letter, followed by a digit, followed by a letter\n\n* An optional space or hyphen\n\n* A digit, followed by a letter, followed by a digit\n\nIf you need a more strict pattern that only matches uppercase letters (as\nofficial Canadian postal codes use), you can use:\n\n[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\n\nFor implementation in a secret detection tool, you might want to add word\nboundaries to avoid partial matches:\n\n\\b[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\\b\n\nWould you like more details about implementing this in a specific secret\ndetection system?\n\n```\n\n\n4. **Test the pattern:** Test the pattern against sample data to ensure it\nworks correctly. You can use sites like\n[https://regex101.com/](https://regex101.com/) to validate patterns before\nyou submit them to your ruleset.\n\n\n5. **Add to your ruleset:** Once satisfied, add the pattern to your\n`rules/pii-data-extension.toml` file and perform some tests to validate.\nThis sample [merge\nrequest](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/5/diffs)\ncontains a newly added rule for Canadian Zip Codes based on the above.\n\n\nAdditionally, you can use GitLab Duo Chat in:\n\n\n* [The GitLab Web IDE (VS Code in the\ncloud)](https://docs.gitlab.com/user/project/web_ide/)  \n\n* VS Code, with the [GitLab Workflow extension for VS\nCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n\n* JetBrains IDEs, with the [GitLab Duo Plugin for\nJetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n\n* Visual Studio for Windows, with the [GitLab Extension for Visual\nStudio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n\n\nIn the future, you’ll be able to leverage [GitLab Duo\nWorkflow](https://docs.gitlab.com/user/duo_workflow/) (currently in private\nbeta) to automatically generate and add these patterns to your code base\ndirectly from your IDE. GitLab Duo Workflow is an AI agent, which transforms\nAI from reactive assistant to autonomous contributor, optimizing your\nsoftware development lifecycle. Learn more about [GitLab Duo\nWorkflow](https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/).\n\n\n## Best practices for PII detection\n\n\n1. **Start small:** Begin with a few critical PII types and expand\ngradually.  \n\n2. **Test thoroughly:** Test your patterns against sample data to avoid\nfalse positives.  \n\n3. **Update regularly:** Review and update your rulesets as new PII\nrequirements emerge.  \n\n4. **Document patterns:** Maintain documentation for your custom regex\npatterns.  \n\n5. **Balance precision:** Make patterns specific enough to avoid false\npositives but flexible enough to catch variations.  \n\n6. **Implement [Secret Push\nProtection](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/):**\nPrevent PII data from making it into your repository.  \n\n7. **Set up [Merge Request Approval\nPolicies](https://docs.gitlab.com/user/application_security/policies/merge_request_approval_policies/):**\nRequire approval before merging any possible PII data to your repository.\n\n\nOnce you have set up a PII data ruleset to meet your organization's needs,\nremote rulesets can scan for PII data across multiple repositories without\nthe need to duplicate the rules file. Watch this video to learn more:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/vjJxQz918WE?si=CRdIEodo3ALxVWXO\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Handling Secret Detection findings\n\n\nWhen GitLab Secret Detection identifies potential PII in your code:\n\n\n1. **Review the finding:** Assess whether it's a legitimate finding or a\nfalse positive.  \n\n2. **Remediate:** Remove the sensitive data and replace it with environment\nvariables or secrets management.  \n\n3. **[Redact text from\nrepository](https://docs.gitlab.com/user/project/merge_requests/revert_changes/#redact-text-from-repository):**\nPermanently delete sensitive or confidential information that was\naccidentally committed, ensuring it's no longer accessible in your\nrepository's history. \n\n4. **Track progress:** Use GitLab's security dashboard to monitor ongoing\ncompliance.\n\n\n## Get started today\n\n\nGitLab Secret Detection, combined with custom PII rulesets, provides a\npowerful defense against inadvertent exposure of sensitive information. By\nleveraging GitLab Duo Chat to create precise regex patterns, teams can\nefficiently implement comprehensive PII detection across their codebase,\nensuring regulatory compliance and protecting user data.\n\n\nRemember that secret detection is just one component of a comprehensive\nsecurity strategy. Combine it with other GitLab security features like\nstatic application security testing, dynamic application security testing,\nand dependency scanning for a more robust security posture.\n\n\nStart implementing these practices today to better protect your users'\npersonal information and maintain the security integrity of your\napplications.\n\n\n> Start [a free trial of GitLab Ultimate and GitLab Duo\n](https://about.gitlab.com/free-trial/)today!\n\n\n## More resources\n\n\nTo learn more about GitLab security and compliance and how we can help\nenhance your AppSec workflows, follow the links below:\n\n\n* [GitLab Security and Compliance\nSolutions](https://about.gitlab.com/solutions/security-compliance/)  \n\n* [GitLab DevSecOps Platform](https://about.gitlab.com/platform/)  \n\n* [GitLab Duo (AI)](https://about.gitlab.com/gitlab-duo/)  \n\n* [GitLab Application Security\ndocumentation](https://docs.gitlab.com/user/application_security/) \n\n* [Secret Detection\ndocumentation](https://docs.gitlab.com/user/application_security/secret_detection/)\n",[691,738,737,496,9],{"slug":1441,"featured":91,"template":693},"enhance-data-security-with-custom-pii-detection-rulesets","content:en-us:blog:enhance-data-security-with-custom-pii-detection-rulesets.yml","Enhance Data Security With Custom Pii Detection Rulesets","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets.yml","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"_path":1447,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1448,"content":1453,"config":1459,"_id":1461,"_type":14,"title":1462,"_source":16,"_file":1463,"_stem":1464,"_extension":19},"/en-us/blog/explain-this-code",{"title":1449,"description":1450,"ogTitle":1449,"ogDescription":1450,"noIndex":6,"ogImage":868,"ogUrl":1451,"ogSiteName":706,"ogType":707,"canonicalUrls":1451,"schema":1452},"ML experiment: Explain this source code","Learn how GitLab is experimenting with ML-powered source code explanation features in this fourth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/explain-this-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Explain this source code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-06\",\n      }",{"title":1449,"description":1450,"authors":1454,"heroImage":868,"date":1456,"body":1457,"category":10,"tags":1458},[1455],"Taylor McCaslin","2023-04-06","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language.\n\n## Explain this code with AI\nAt GitLab, we’re experimenting with AI-assisted code explanations. We want to enable software developers to quickly understand source code they encounter. Whether it's starting with a new project, contributing to a project in a language they're not fluent in, or just trying to understand historical code, we want to help developers get up to speed quickly.\n\nIn a rapid prototype, our own [Denys Mishunov](https://gitlab.com/mishunov), Staff Frontend Engineer, and [Michael Le](https://gitlab.com/mle), Senior Product Designer for our [Create::Source Code group](https://handbook.gitlab.com/handbook/product/categories/#source-code-group), leverage AI to power code explanations within [GitLab's repository source code file viewer](https://docs.gitlab.com/ee/user/project/repository/).\n\n\n![Prototype UX for Explain this Code](https://about.gitlab.com/images/blogimages/explain-this-code-hr.png){: .shadow}\n\nAbove, you can see an example of highlighting a selection of code and asking for a code explanation. Watch the full demo below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/xzsFfFqvlnU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting on the repository file viewer, and this prototype can be extended to anywhere you interact with code within GitLab, from [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) to [snippets](https://docs.gitlab.com/ee/user/snippets.html), and beyond.\n\nThis experiment is just the start of the ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[917,738,717,9],{"slug":1460,"featured":6,"template":693},"explain-this-code","content:en-us:blog:explain-this-code.yml","Explain This Code","en-us/blog/explain-this-code.yml","en-us/blog/explain-this-code",{"_path":1466,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1467,"content":1473,"config":1478,"_id":1480,"_type":14,"title":1481,"_source":16,"_file":1482,"_stem":1483,"_extension":19},"/en-us/blog/explain-this-vulnerability",{"title":1468,"description":1469,"ogTitle":1468,"ogDescription":1469,"noIndex":6,"ogImage":1470,"ogUrl":1471,"ogSiteName":706,"ogType":707,"canonicalUrls":1471,"schema":1472},"ML experiment: Explain this vulnerability","Learn how GitLab is experimenting with vulnerability explanation and mitigation recommendations in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/explain-this-vulnerability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Explain this vulnerability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2023-05-02\",\n      }",{"title":1468,"description":1469,"authors":1474,"heroImage":1470,"date":1475,"body":1476,"category":10,"tags":1477},[1339],"2023-05-02","\n\n\u003Ci>This blog is the latest post an ongoing series about GitLab’s journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The first blog post can be found [here](/blog/what-the-ml-ai/). Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nGitLab surfaces vulnerabilities that contain relevant information. However, more often users aren't sure where to start. \nIt takes time to research and synthesize information that is surfaced within the vulnerability record. Moreover, figuring out how to fix a given vulnerability can be difficult.\n\nTo help teams identify an effective way to fix a vulnerability within the context of their specific code base, we have released an [experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment)\n feature that provides GitLab AI-assisted vulnerability recommendations leveraging the explanatory power of large language models. This capability combines basic vulnerability \n information with insights derived from the customer's code to explain the vulnerability in context, demonstrate how it can be exploited, and provide an example fix.\n\n[Isaac Dawson](https://gitlab.com/idawson) and [Dinesh Bolkensteyn](https://gitlab.com/dbolkensteyn), both [GitLab Vulnerability Research](/handbook/engineering/development/sec/secure/vulnerability-research/) \nengineers, tested prompts in a large language model to see if prompts could yield helpful results. After fine-tuning the prompts, they found that some prompts could provide better details\n and even suggest recommendations for a fix to vulnerabilities related to static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)). \n In a week's time, Product Designer [Becka Lippert](https://gitlab.com/beckalippert) designed a prototype and [Daniel Tian](https://gitlab.com/dftian), \n [Mo Khan](https://gitlab.com/mokhax), and [Neil McCorrison](https://gitlab.com/nmccorrison) built this experimental feature in GitLab.\n\n![Explain and mitigate this vulnerability with AI](https://about.gitlab.com/images/blogimages/2023-04-27-explain-this-vulnerability.png){: .shadow}\n\n\nThis feature is powered by Google AI. Learn more about [our partnership with Google Cloud](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html) to enrich GitLab features with generative AI.\n\nYou can explore the \"explain this vulnerability\" feature with a [click-through demo](https://go.gitlab.com/0qIe3O).\n\n## Iterating on AI/ML features\n\nThis [experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) feature is available in GitLab.com today. We are exploring what this feature could look like for \nother types of vulnerabilities beyond SAST and in a merge request. Have an idea that would make this feature better? Please share it with us, along with any feedback, in this \n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407295).\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[1181,717,716,9],{"slug":1479,"featured":6,"template":693},"explain-this-vulnerability","content:en-us:blog:explain-this-vulnerability.yml","Explain This Vulnerability","en-us/blog/explain-this-vulnerability.yml","en-us/blog/explain-this-vulnerability",{"_path":1485,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1486,"content":1491,"config":1497,"_id":1499,"_type":14,"title":1500,"_source":16,"_file":1501,"_stem":1502,"_extension":19},"/en-us/blog/extending-code-suggestions",{"title":1487,"description":1488,"ogTitle":1487,"ogDescription":1488,"noIndex":6,"ogImage":868,"ogUrl":1489,"ogSiteName":706,"ogType":707,"canonicalUrls":1489,"schema":1490},"ML experiment: Extending Code Suggestions to more development environments","Learn how GitLab is experimenting with extending Code Suggestions to Visual Studio, JetBrains IDE, Neovim, and other environments in our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/extending-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Extending Code Suggestions to more development environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-06-01\",\n      }",{"title":1487,"description":1488,"authors":1492,"heroImage":868,"date":1494,"body":1495,"category":10,"tags":1496},[1493],"Kai Armstrong","2023-06-01","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nWe've been continuing to [extend the reach](/blog/code-suggestions-for-all-during-beta/) of GitLab Code Suggestions and make [improvements](/blog/code-suggestions-improves-developer-productivity/) to enhance developer productivity. Continuing with our theme of experimentation and iteration, we're now announcing experimental support for Code Suggestions in Visual Studio, JetBrains IDEs, Neovim, and other development environments.\n\n## Code Suggestions for Visual Studio\n\nIn this rapid prototype, [Michael Eddington](https://gitlab.com/mikeeddington), Staff Backend Engineer, built an extension to bring GitLab Code Suggestions to Visual Studio. With this experiment, you can begin writing code and have suggestions provided to help accelerate your development efforts while you type.\n\n![GitLab Code Suggestions in Visual Studio](https://about.gitlab.com/images/blogimages/code-suggestions-visual-studio-ide.gif){: .shadow}\n\nYou can follow our instructions for [getting started](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-visual-studio-experiment#getting-started) to try this extension out today. Provide your feedback about this extension in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-visual-studio-experiment/-/issues/1).\n\n## Code Suggestions for JetBrains IDEs\n\nIn this rapid prototype, [Dinesh Bolkensteyn](https://gitlab.com/dbolkensteyn), Senior Vulnerability Researcher, built a plugin to bring GitLab Code Suggestions to JetBrains IDE. With this experiment, you can begin writing code and have suggestions provided to help accelerate your development efforts while you type.\n\n![GitLab Code Suggestions in JetBrains IDE](https://about.gitlab.com/images/blogimages/code-suggestions-jetbrains-ide.gif){: .shadow}\n\nYou can follow our instructions for [getting started](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-jetbrains-experiment#getting-started) to try it out today. Provide your feedback about this extension in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-jetbrains-experiment/-/issues/2).\n\n## Code Suggestions Language Server for Neovim and more\n\nIn this rapid prototype, [Julian Thome](https://gitlab.com/julianthome), Staff Vulnerability Research Engineer, and [Michael Henriksen](https://gitlab.com/mhenriksen), Senior Vulnerability Research Engineeer, developed a language server that leverages the Language Server Protocol to provide GitLab Code Suggestions in Neovim or any other [editor with LSP support](https://microsoft.github.io/language-server-protocol/implementors/tools/). \n\n![GitLab Code Suggestions in Neovim](https://about.gitlab.com/images/blogimages/code-suggestions-neovim.gif){: .shadow}\n\nThis language server allows you to configure any supporting editor or IDE to start receiving GitLab Code Suggestions as you type. We've provided instructions for getting started with [Neovim](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/nvim.md), [Sublime Text](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/sublime.md), and [Emacs](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/blob/main/docs/emacs.md) for your convenience. Provide your feedback about these integrations in [this issue](https://gitlab.com/gitlab-org/editor-extensions/experiments/gitlab-code-suggestions-language-server-experiment/-/issues/2).\n\n## Iterating on AI/ML features\n\nWhile these are just experiments today, we are iterating on how to effectively bring mature IDE integrations like these to our customers. We'll continue to refine these integrations and improve the experience to provide you with streamlined code suggestions while you work, wherever you choose to work. We're also working to expand our scope beyond these IDEs so if you have an interest in seeing an additional editor/IDE supported, [let us know](https://gitlab.com/groups/gitlab-org/-/epics/2431).\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":1498,"featured":6,"template":693},"extending-code-suggestions","content:en-us:blog:extending-code-suggestions.yml","Extending Code Suggestions","en-us/blog/extending-code-suggestions.yml","en-us/blog/extending-code-suggestions",{"_path":1504,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1505,"content":1508,"config":1515,"_id":1517,"_type":14,"title":1518,"_source":16,"_file":1519,"_stem":1520,"_extension":19},"/en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"noIndex":6,"title":1506,"description":1507},"Fast and secure AI agent deployment to Google Cloud with GitLab","Follow this step-by-step guide, complete with a demo application, to learn how to use agentic AI, along with GitLab's native integrations and CI/CD components.",{"title":1506,"description":1507,"authors":1509,"heroImage":1511,"date":1512,"body":1513,"category":10,"tags":1514},[1510],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","2025-07-07","[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) is transforming\nhow we build intelligent applications, but deploying AI agents securely and\nefficiently can be challenging. In this tutorial, you'll learn how to deploy\nan AI agent built with Google's Agent Development Kit\n([ADK](https://cloud.google.com/vertex-ai/generative-ai/docs/agent-development-kit/quickstart))\nto Cloud Run using [GitLab's native\nintegrations](https://cloud.google.com/blog/topics/partners/understand-the-google-cloud-gitlab-integration)\nand [CI/CD components](https://docs.gitlab.com/ci/components/).\n\n\n## What are AI agents and why do they matter?\n\n\nAgentic AI represents a significant evolution in artificial intelligence. Unlike traditional generative AI tools that require constant human direction, AI agents leverage advanced language models and natural language processing to take independent action. These systems can understand requests, make decisions, and execute multistep plans to achieve goals autonomously.\n\n\nThis tutorial uses Google's ADK, a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and built for compatibility with other frameworks.\n\n\n## Our demo application: Canada City Advisor\n\n\nTo demonstrate the deployment process, we'll work with a practical example: the Canada City Advisor. This AI agent helps users find their ideal Canadian city based on their preferences and constraints.\n\n\nHere's how it works:\n\n\n* Users input their budget requirements and lifestyle preferences.  \n\n* The root agent coordinates two sub-agents:  \n\n  * A budget analyzer agent that evaluates financial constraints. This draws data obtained from the Canada Mortgage and Housing Corporation.  \n  * A lifestyle preferences agent that matches cities to user needs. This includes a weather service that uses [Open-Meteo](https://open-meteo.com/) to get the proper city information.  \n* The system generates personalized city recommendations\n\n\nThis multi-agent architecture showcases the power of agentic AI - different specialized agents working together to solve a complex problem. The sub-agents are only invoked when the root agent determines that budget and lifestyle analysis are needed.\n\n\n![Multi-agent architecture to develop demo application with agentic AI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1751576568/obgxpxvlnxtzifddrrz1.png)\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* A Google Cloud project with the following APIs enabled:  \n\n  * Cloud Run API  \n  * Artifact Registry API  \n  * Vertex AI API  \n* A GitLab project for your source code  \n\n* Appropriate permissions in both GitLab and Google Cloud\n\n\n**Step 1: Set up IAM integration with Workload Identity Federation**\n\n\nThe first step establishes secure, keyless authentication between GitLab and Google Cloud using [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). This eliminates the need for service account keys and improves security.\n\n\nIn your GitLab project:\n\n\n1. Navigate to **Settings > Integrations > Google Cloud IAM.**  \n\n2. Provide the following information:  \n\n   * **Project ID**: Your Google Cloud project ID  \n   * **Project Number**: Found in your Google Cloud console  \n   * **Pool ID**: A unique identifier for your workload identity pool  \n   * **Provider ID**: A unique identifier for your identity provider\n\nGitLab will generate a script for you. Copy this script and run it in your Google Cloud Shell to create the Workload Identity Federation.\n\n\n**Step 2: Configure Google Artifact Registry integration**\n\n\nNext, we'll set up the connection to Google Artifact Registry where our container images will be stored.\n\n\n1. In GitLab, go to **Settings > Integrations > Google Artifact Registry.**  \n\n2. Enter:  \n\n   * **Google Cloud Project ID**: Same as in Step 1  \n   * **Repository Name**: Name of an existing Artifact Registry repository  \n   * **Location**: The region where your repository is located\n\n**Important**: The repository must already exist in Artifact Registry. GitLab won't create a new one for you in this context.\n\n\nGitLab will generate commands to set up the necessary permissions. Run these in Google Cloud Shell.\n\n\nAdditionally, add these roles to your service principal for Cloud Run deployment:\n\n\n* `roles/run.admin`  \n\n* `roles/iam.serviceAccountUser`  \n\n* `roles/cloudbuild.builds.editor`\n\n\nYou can add these roles using the following gcloud commands:\n\n\n```shell\n\nGCP_PROJECT_ID=\"\u003Cyour-project-id>\" #replace\n\nGCP_PROJECT_NUMBER=\"\u003Cyour-project-number>\" #replace\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003Cyour-pool-id>\" #replace\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n**Step 3: Create the CI/CD pipeline**\n\n\nNow for the exciting part – let's build our deployment pipeline! GitLab's CI/CD components make this remarkably simple.\n\n\nCreate a `.gitlab-ci.yml` file in your project root:\n\n\n```unset\n\nstages:\n  - build\n  - test\n  - upload\n  - deploy\n\nvariables:\n  GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n  AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n  image: docker:24.0.5\n  stage: build\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $GITLAB_IMAGE .\n    - docker push $GITLAB_IMAGE\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n  - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n    inputs:\n      stage: upload\n      source: $GITLAB_IMAGE\n      target: $AR_IMAGE\n  - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n    inputs:\n      stage: deploy\n      project_id: \"\u003Cyour-project-id>\" #replace\n      service: \"canadian-city\"\n      region: \"us-central1\"\n      image: $AR_IMAGE\n```\n\n\nThe pipeline consists of four stages:\n\n\n1. **Build**: Creates the Docker container with your AI agent  \n\n2. **Test**: Runs security scans (container scanning, dependency scanning, SAST)  \n\n3. **Upload**: Pushes the container to Artifact Registry  \n\n4. **Deploy**: Deploys to Cloud Run\n\n\nThe great thing about using [GitLab's CI/CD components](https://docs.gitlab.com/ci/components/) is that you only need to provide a few parameters - the components handle all the complex authentication and deployment logic.\n\n\n**Step 4: Deploy and test**\n\n\nWith everything configured, it's time to deploy:\n\n\n1. Commit your code and `.gitlab-ci.yml` to your GitLab repository.  \n\n2. The pipeline will automatically trigger.  \n\n3. Monitor the pipeline progress in GitLab's CI/CD interface.  \n\n4. Once complete, find your Cloud Run URL in the Google Cloud Console.\n\n\nYou'll see each stage execute:\n\n\n* Build stage creates your container.  \n\n* Test stage runs comprehensive security scans.  \n\n* Upload stage pushes to Artifact Registry.  \n\n* Deploy stage creates or updates your Cloud Run service.\n\n\n## Security benefits\n\n\nThis approach provides several security advantages:\n\n\n* **No long-lived credentials:** Workload Identity Federation eliminates service account keys.  \n\n* **Automated security scanning:** Every deployment is scanned for vulnerabilities.  \n\n* **Audit trail:** Complete visibility of who deployed what and when.  \n\n* **Principle of least privilege:** Fine-grained IAM roles limit access.\n\n\n## Summary\n\nBy combining GitLab's security features with Google Cloud's powerful AI and serverless platforms, you can deploy AI agents that are both secure and scalable. The integration between GitLab and Google Cloud eliminates much of the complexity traditionally associated with such deployments.\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/ai-agent-deployment)\nto get started now. Not a GitLab customer yet? Explore the DevSecOps platform with [a free trial](https://about.gitlab.com/free-trial/).\n",[9,1084,737],{"featured":6,"template":693,"slug":1516},"fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab","content:en-us:blog:fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","Fast And Secure Ai Agent Deployment To Google Cloud With Gitlab","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"_path":1522,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1523,"content":1527,"config":1533,"_id":1535,"_type":14,"title":1536,"_source":16,"_file":1537,"_stem":1538,"_extension":19},"/en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui",{"config":1524,"title":1525,"description":1526},{"noIndex":6},"Get started with GitLab Duo Agentic Chat in the web UI","Learn about our new GitLab Duo AI feature that automates tasks by breaking down complex problems and executing operations across multiple sources.",{"title":1525,"description":1526,"authors":1528,"heroImage":892,"date":1530,"body":1531,"category":10,"tags":1532},[711,1529],"Daniel Helfand","2025-08-11","In May 2025, GitLab launched an experimental feature called [GitLab Duo\nAgentic\nChat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/).\nThe goal of Agentic Chat was to build on the success of [GitLab Duo\nChat](https://docs.gitlab.com/user/gitlab_duo_chat/), which is an AI chat\nexperience built into supported IDEs and in the GitLab UI. While Chat\nprovides answers and suggestions for developers using the GitLab platform,\nAgentic Chat can more directly interact with the GitLab API on behalf of\nusers, taking actions on their behalf as a result of the conversation.\n\n\nIn addition to being available in a variety of IDEs, Agentic Chat is available directly within the GitLab UI for GitLab users with the Duo Pro or Enterprise add-on. Adding Agentic Chat to the GitLab UI helps make this experience more accessible to all GitLab users and easy to integrate into your workflows. To open Agentic Chat:\n\n\n1. Navigate to any Group or Project in your GitLab instance.  \n\n2. Look for the GitLab Duo Chat button (typically in the top right corner).  \n\n3. Click to open the chat panel.  \n\n4. Toggle to **Agentic mode (Beta)** in the chat window.\n\n\n**Pro tip:** Keep the chat panel open as you work — it maintains context and can help you across different pages and projects.\n\n\nTo get familiar with Agentic Chat, ask about the tools it can work with. This is like using the help command for a command-line tool.  \n\n\n```offset\nWhat tools do you have access to? \n```\n\n![GitLab Duo Agentic Chat screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754584200/emtgilbzbu8ftkynjozg.png)\n\nThe output above shows us that Agentic Chat has access to a variety of GitLab APIs and data that will allow it to perform complex tasks across the software development lifecycle.   \n\n## Issue management made easy\n\n\nGitLab Duo Agentic Chat can help you keep track of issues, find specific ones, understand the status, and take actions based on conversations in these issues. Instead of navigating through pages and pages of issues, you ask Agentic Chat about the issues in a project. It will respond with high-level information about the issues, including the priority, labels, and the status of the issue.\n\n\nFor a specific issue, Agentic Chat will fetch the issue details, provide a concise summary, highlight recent activity, and share the goal of the issue. This is particularly helpful when you need context or updates before a meeting or are researching the issue before picking it up. \n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1107479358?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agentic Chat UI Issue Management\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\n\nYou can also try more complex queries if you're looking to better understand a project overall. And once you've discovered these issues, you can make changes to them like adding labels, updating milestones, and re-organizing them. \n\n\nFor example, maybe you're looking for all the issues that are database- or performance-related in order to prioritize them in the next sprint. You could task Agentic Chat with the following prompt.\n\n\n```offset\nAnalyze all issues labeled 'performance' and 'database' - group them by component and show me which ones have had the most discussion activity in the last 30 days.\n```\n\n\nAgentic Chat will respond with issues grouped by the backend and frontend component of a project, identify the issues with significant discussion activity, and provide insights on these kinds of issues (e.g., when were most of these issues created or which component issues have more active discussion).\n\n\n```offset\nCreate an issue template for bug reports that includes:\n\n- Steps to reproduce\n\n- Expected behavior vs actual behavior\n\n- Environment details (browser, OS, GitLab version)\n\n- Severity assessment\n\n- Screenshots/error logs section\n\nName it \"bug_report.md\" and format it as a proper GitLab issue template\n```\n\n\n## CI/CD support  \n\n\nThis is where GitLab Duo Agentic Chat truly becomes your debugging superhero. We've all been there: a pipeline fails and you have to click through job logs trying to understand what went wrong. Agentic Chat can do more than just explain the failure to you and suggest recommendations. After reviewing the failed pipeline logs, Agentic Chat can suggest a fix and also add the fix to a merge request you are working on.\n\n\nLet's say you have a merge request adding a new feature, but the pipeline is failing. Instead of clicking through each failed job and trying to piece together what's wrong, you can ask Agentic Chat to investigate.\n\n\nAgentic Chat will analyze the pipeline, check the job logs, and explain that the tests are failing because of missing test data or configuration issues. But here's where it gets even more powerful — you don't have to stop at understanding the problem. Agentic Chat can also act on the advice it presents and add commits to fix the pipeline in the merge request.\n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1107495269?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Agentic Chat CI/CD Fix\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\n\n## Building complex prompts\n\n\nGitLab Duo Agentic Chat can also help you craft your prompts. Let's say that you're running a bug bash with your team and want to triage all possible issues that might be bug reports.\n\n\nIf you use a simple prompt like below, Agentic Chat will come up with ways to find the related issues, such as searching for terms or pattern matching:\n\n\n```offset\nI need help writing an effective prompt to find all possible bug report issues in my GitLab project, including those that might not be properly labeled as \"bug\".\n```\n\nOften you can use the recommendations in Agentic Chat to build a more in-depth prompt based on what you're looking for: \n\n```offset\nI need help writing an effective prompt to find all possible bug report issues in my GitLab project, including those that might not be properly labeled as \"bug\". Please help me create a prompt that will:\n\n\n1. Search for common bug-related terminology beyond just the word \"bug\"\n\n2. Identify patterns that indicate bug reports (like \"steps to reproduce\", \"expected vs actual behavior\")\n\n3. Find technical issues that might be bugs (errors, crashes, performance problems)\n\n4. Catch user-reported problems that could be bugs but use different language\n\n\nThe prompt should ensure we don't miss any potential bugs regardless of how they're described or labeled. What would be the most effective approach and search strategy for this?\n```\n\n\nOnce you have the prompt and you're able to search for the issues you're looking for, that's where Agentic Chat really shines. Agentic Chat can triage and update those issues for you to prepare them for the bug bash:\n\n\n```offset\nFind and triage all bug-related issues for our bug bash event. Execute these steps:\n\n\n1. Search for potential bugs using individual searches:\n   - Core terms: \"bug\", \"fix\", \"error\", \"broken\", \"issue\", \"problem\", \"not working\"\n   - Bug patterns: \"steps to reproduce\", \"expected behavior\", \"regression\"\n   - Technical issues: \"exception\", \"crash\", \"console error\", \"500 error\", \"404 error\"\n   - Performance: \"slow\", \"freezes\", \"unresponsive\"\n\n2. For each issue found:\n   - Add the \"Event - Bug Bash\" label\n   - Assign appropriate bug severity label (critical/high/medium/low)\n   - Add to the current bug bash milestone\n   - If missing \"bug\" label, add it\n\n3. Create a triage list organized by:\n   - Critical bugs (data loss, crashes, security)\n   - High priority (blocking features, frequent errors)\n   - Medium priority (workarounds available)\n   - Low priority (minor UI issues)\n\nSearch both open and closed issues. Focus on actionable bugs that can be fixed during the bug bash, excluding enhancement requests. Provide a summary table with issue numbers, titles, and assigned severity for the bug bash team.\n```\n\n\nYou can ask Agentic Chat to create a bug report template, which increases efficiency and eliminates some manual effort. Also, future bug reports will have the structure and labels you need for more efficient triaging.   \n\n## Tips for effective prompting \n\n\nWhen you're working with GitLab Duo Agentic Chat, it's important to phrase your requests with action-oriented verbs like \"create,\" \"update,\" \"fix,\" or \"assign.\" This will trigger the agentic tools to take action rather than summarize or share information with you. One approach before taking agentic actions can be to request summaries and analyses — the way we did with the issues about bugs. Then, see what gets returned before taking actions like applying a label or adding to a milestone. \n\n\nIt's also important to give clear criteria when asking for bulk operations. Specify exact conditions like \"all issues with the 'bug' label created in the last week\" or \"merge requests waiting for review for more than 3 days.\" The more specific you are, the more accurate and helpful the results will be.\n\n\nSince Agentic Chat has the ability to maintain context, you can chain requests and build on previous requests. After getting an initial set of issues, you might ask \"From those issues, which ones are unassigned?\" and then follow up with \"Assign the high-priority ones to the backend team.\" This allows you to refine and act on information iteratively. \n\n\nWe recommend starting with an open-ended request and allowing GitLab Duo to help you look for patterns or similar problems across your project. That will help you catch any problem that you may have missed or understand the scope of the challenge before taking action.   \n\n\n## Get hands-on with GitLab Duo Agentic Chat\n\n\nWe hope all the ideas above give you some thoughts on getting started with Agentic Chat, but we are even more excited to see all our users' ideas come to life with it. To try the Agentic Chat UI experience in your next project, sign up for a [free trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/). You can learn more about GitLab Duo Agentic Chat on our [documentation page](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/), which also details how to enable Agentic Chat in the GitLab UI.\n",[9,109],{"featured":91,"template":693,"slug":1534},"get-started-with-gitlab-duo-agentic-chat-in-the-web-ui","content:en-us:blog:get-started-with-gitlab-duo-agentic-chat-in-the-web-ui.yml","Get Started With Gitlab Duo Agentic Chat In The Web Ui","en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui.yml","en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui",{"_path":1540,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1541,"content":1547,"config":1554,"_id":1556,"_type":14,"title":1557,"_source":16,"_file":1558,"_stem":1559,"_extension":19},"/en-us/blog/gitlab-16-ai-and-security-take-center-stage",{"title":1542,"description":1543,"ogTitle":1542,"ogDescription":1543,"noIndex":6,"ogImage":1544,"ogUrl":1545,"ogSiteName":706,"ogType":707,"canonicalUrls":1545,"schema":1546},"GitLab 16: AI and security take center stage","Our GitLab 16 launch event showcased our AI-powered workflows that drive usability improvements, security enhancements, and observability advancements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671793/Blog/Hero%20Images/16_0-cover-image.png","https://about.gitlab.com/blog/gitlab-16-ai-and-security-take-center-stage","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 16: AI and security take center stage\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-06-30\",\n      }",{"title":1542,"description":1543,"authors":1548,"heroImage":1544,"date":1550,"body":1551,"category":10,"tags":1552},[1549],"David DeSanto, Chief Product Officer, GitLab","2023-06-30","\nThe new era of DevSecOps is here – and its focus is on improving everyone’s experience through AI-powered workflows that drive usability improvements, security enhancements, and observability advancements.\n\nAt our recent GitLab 16 launch event, we highlighted how our platform has evolved to fuel productivity and efficiency, which are top of mind in 2023, according to GitLab’s [2023 Global DevSecOps Report: Productivity & Efficiency Within Reach](https://about.gitlab.com/developer-survey/).\n\n> If you missed the GitLab 16 event, it’s [available on demand](https://about.gitlab.com/sixteen/), so it’s not too late.\n\nWhat I find most exciting about the launch of [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/) is that it marks a significant milestone for our customers, as well as for GitLab, and heralds the era of AI-powered DevSecOps. We’ve built upon the nearly 500 new capabilities we introduced with GitLab 15 and have continued that upward trajectory. \n\nThe GitLab 16 event showcased amazing new capabilities across our entire DevSecOps platform, which is reinforced by our significant investments in critical areas: \n\n1. Building a world-class DevSecOps experience that includes significant usability improvements, additional collaboration capabilities, and [AI-assisted workflows](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/). \n2. Providing advanced security and compliance, deepening our capabilities, and bringing [software supply chain security](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/) to the forefront of software development.\n3. Bringing observability, analytics, and feedback into our DevSecOps platform, [empowering organizations to close the SDLC loop](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) with user data.\n4. Offering GitLab for data science workloads to enable data scientists and data engineers to benefit from all the value our DevSecOps Platform provides, including collaboration, reproducibility, and streamlined deployment into production.\n\n## Introducing GitLab Duo\nAs part of GitLab 16, we [unveiled GitLab Duo](https://about.gitlab.com/blog/meet-gitlab-duo-the-suite-of-ai-capabilities/), our suite of enterprise-grade AI capabilities powering DevSecOps workflows. GitLab Duo is integrated directly into the DevSecOps platform, enabling you to write better code faster and more efficiently. And GitLab Duo goes well beyond code creation to deliver AI-assisted workflows at all stages of the software development lifecycle, such as security testing and analysis, observability, and proactive vulnerability detection. Our goal is to help you achieve a 10x improvement in workflow efficiency by tapping into all of the DevSecOps platform’s AI capabilities. \n\nGitLab Duo includes Code Suggestions, Explain this Code, Explain this Vulnerability, Summarize Issue Comments, Chat, and more.\n\nLearn about the [powerful features of GitLab Duo](https://about.gitlab.com/gitlab-duo/).\n\n## Joining forces with Google Cloud\nAt the launch June Yang, VP of Cloud AI and Industry Solutions at Google Cloud, discussed our partnership with [Google Cloud](https://about.gitlab.com/partners/technology-partners/google-cloud-platform/), (as recently [announced in May](https://about.gitlab.com/press/releases/2023-05-02-gitlab-and-google-cloud-partner-to-expand-ai-assisted-capabilities/)) in which we’re building several joint solutions that will help enterprise customers to improve the efficiency, effectiveness, and quality of their software development processes.\n\nBoth GitLab and Google Cloud are committed to developing new AI-powered solutions that help businesses improve their software development processes and protect their data.\n\nGitLab's vision for generative AI is grounded in privacy, security, and transparency. The partnership with Google Cloud enables GitLab to offer private and secure AI-powered features, while ensuring customer intellectual property (i.e., their source code) stays theirs and will not be used for training and fine-tuning of AI models. \n\n## CARFAX’s DevSecOps results\nMark Portofe, Director of Platform Engineering at CARFAX, also joined us for our GItLab 16 launch. It was enlightening to hear how CARFAX has been using GitLab since 2017 to make them more productive and more secure. Mark shared how CARFAX can now [create CI/CD pipelines](https://about.gitlab.com/solutions/continuous-integration/) in hours instead of the days or even weeks it took before – freeing up their developers’ time to focus on getting code to production. As a result, their number of production deployments has increased by 20% year over year.\n\n## GitLab Dedicated\nAs part of our GitLab 16 event, we also shared that [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS offering of GitLab’s DevSecOps platform designed to address the needs of customers with stringent compliance requirements, is now generally available. \n\nWith GitLab Dedicated, organizations can access all of the benefits of the DevSecOps platform delivered as a SaaS offering – including faster releases, better security, and more productive developers – while satisfying compliance requirements such as data residency, isolation, and private networking.\n\n## Value Stream Analytics and Dashboards\nWhen it comes to observability, analytics, and feedback, our single application shines by providing end-to-end metrics and insights. We see this being very much native to our DevSecOps platform.\n\nWe’ve made great strides with [Value Streams Dashboards](https://www.youtube.com/watch?v=EA9Sbks27g4), a  popular feature with our customers. These dashboards combine DORA 4 metrics with GitLab-specific metrics to give organizations insights into the health of their software delivery, identifying areas of efficiency and areas for improvement. \n\nWe are also introducing [Product Analytics](https://about.gitlab.com/blog/introducing-product-analytics-in-gitlab/) in GitLab 16 to close the DevSecOps loop with user metrics and feedback from the applications that organizations are building with GitLab, which they can incorporate into their planning efforts. \n\n## Watch the GitLab 16 launch event\nI’m really proud of the work our teams put into GitLab 16 to make it a reality. To hear more and dig deeper into the amazing capabilities of GitLab 16, check out the [launch event](https://about.gitlab.com/sixteen/).\n",[9,496,1553,691],"releases",{"slug":1555,"featured":6,"template":693},"gitlab-16-ai-and-security-take-center-stage","content:en-us:blog:gitlab-16-ai-and-security-take-center-stage.yml","Gitlab 16 Ai And Security Take Center Stage","en-us/blog/gitlab-16-ai-and-security-take-center-stage.yml","en-us/blog/gitlab-16-ai-and-security-take-center-stage",{"_path":1561,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1562,"content":1566,"config":1573,"_id":1575,"_type":14,"title":1576,"_source":16,"_file":1577,"_stem":1578,"_extension":19},"/en-us/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering",{"config":1563,"title":1564,"description":1565},{"noIndex":6},"GitLab 18.3: Expanding AI orchestration in software engineering","Learn how we're advancing human-AI collaboration with enhanced Flows, enterprise governance, and seamless tool integration.",{"title":1564,"description":1565,"authors":1567,"heroImage":1569,"date":1570,"body":1571,"category":10,"tags":1572},[1568],"Bill Staples","https://res.cloudinary.com/about-gitlab-com/image/upload/v1755711502/wuuadis1pza3zehqohcc.png","2025-08-21","Today, GitLab is a comprehensive DevSecOps platform, unifying every stage of the software lifecycle. Building on that foundation, we're on a journey toward becoming the world's first AI-native platform for software engineering. At GitLab, we believe the future of software engineering is an inherently human and AI collaboration, and we want to bring the very best AI capabilities to every GitLab user.  \n\nThis transformation is happening at three distinct layers that go beyond what other AI dev tools are doing:\n\n![AI-native transformation slide visualizing what's laid out below](https://res.cloudinary.com/about-gitlab-com/image/upload/v1755762266/iwuugge3cxweiyvi0yjk.png)\n\n**First, we are a system of record.** Our unified data platform holds your most valuable digital assets. This includes your source code and intellectual property, as well as a wealth of unstructured data spanning project plans, bug backlogs, CI/CD configurations, deployment histories, security reports, and compliance data. This creates a treasure trove of contextual data that remains securely within your GitLab environment, unavailable to generic agents or large language models.\n\n**Second, we act as your software control plane.** We orchestrate your most critical business processes through Git repositories, REST APIs, and webhook-based interfaces that power your end-to-end software delivery. Many of our customers consider this a tier-0 dependency that their critical business processes rely on daily.\n\n**Third, we deliver a powerful user experience.** We deliver an integrated interface that helps eliminate the costly context-switching that slows down most engineering teams. With complete lifecycle visibility and collaboration tools in one platform, over 50 million registered users and our vast community depend on GitLab to get their work done. This expertise positions GitLab uniquely to pioneer intuitive human-to-AI collaboration that amplifies team productivity while preserving the workflows that our users know and trust.\n\n**Extending our platform with AI natively integrated at every layer**\n\n[GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) integrates and extends all three of these layers. It is designed for extensibility and interoperability, enabling customers and partners to build solutions that create even more value. Our open platform approach emphasizes seamless connectivity with external AI tools and systems while being deeply integrated into our existing stack at all three layers.\n\n* First, we're extending our unified data platform with a **Knowledge Graph,** which indexes and stitches together code with all of the rest of your unstructured data, specifically optimized for agentic access. AI thrives on context, and we believe this will not only accelerate reasoning and inference by agents but also deliver lower-cost and higher-quality agentic outcomes.\n* Second, we're adding an important **Orchestration Layer** to our existing Control Plane in three distinct parts: enabling agents and flows to register as subscribers for GitLab SDLC events, building a new orchestration engine that allows for purpose-built, multi-agent flows, and exposing GitLab tools, agents, and flows via MCP and standard protocols for unparalleled interoperability.\n* Finally, we're extending the **GitLab experience** to deliver first-class agents and agent flows across the entire software development lifecycle. You'll be able to assign async tasks to agents, @ mention them in comments, and create custom agents with context specific to your workflows — but more importantly, GitLab is shipping native agents for every stage of development while unlocking a rich ecosystem of third-party agents. This creates true human-to-AI collaboration where agents become as natural to work with as your human teammates.\n\nWatch this video to see what's coming in 18.3 and beyond, or read on.\n\n\u003Cdiv style=\"padding:75% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111796316?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab_18.3 Release_081925_MP_v1\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## What's new in GitLab 18.3\n\nWith 18.2, we introduced specialized [AI agents](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/#agents-that-work-out-of-the-box:~:text=Agents%20that%20work%20out%20of%20the%20box) that work alongside developers across the software development lifecycle, plus our [Software Development Flow](\u003Chttps://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/#agents-that-work-out-of-the-box:~:text=we%20are%20building%3A-,Software%20Development%20Flow,-(now%20in%20beta>) — a powerful feature that gives users the ability to orchestrate multiple agents to plan, implement, and test code changes end-to-end.\n\nGitLab 18.3 introduces expanded integrations and interoperability, more Flows, and enhanced context awareness across the entire software development lifecycle.\n\n### Expanded integrations and interoperability\n\nWe're delivering comprehensive AI extensibility through both first-party GitLab agents and a rich ecosystem of third-party agents, all with full access to project context and data. This approach maintains native GitLab workflows and governance while providing the flexibility to choose preferred tools through highly integrated orchestration between these agents and GitLab's core platform. Teams gain enhanced AI functionality while preserving key integration, oversight, and user experience benefits.\n\n* **MCP server - Universal AI integration:** GitLab's MCP ([Model Context Protocol](https://about.gitlab.com/topics/ai/model-context-protocol/)) server enables AI systems to securely integrate directly with your GitLab projects and development processes. This standardized interface eliminates custom integration overhead and allows your AI tools — including [Cursor](https://docs.cursor.com/en/tools/mcp) — to work intelligently within your existing GitLab environment. See our [docs](https://docs.gitlab.com/user/gitlab_duo/model_context_protocol/mcp_server/) for a full list of tools included with 18.3. **This is only the start; additional tools are planned for 18.4.**\n> *“Bringing GitLab workflows directly into Cursor is a critical step in reducing friction for developers. By minimizing the need for context switching, teams can check issue status, review merge requests, and monitor pipeline results without ever leaving their coding environment. This integration is a natural fit for our shared customers, and we look forward to a long-term partnership with GitLab to continue enhancing developer productivity.”*\n>\n> \\- **Ricky Doar, VP of Field Engineering at Cursor**\n>\n> *“GitLab's MCP server and CLI agent support create powerful new ways for Amazon Q to integrate with development workflows. Amazon Q Developer can now connect directly through GitLab's remote MCP interface, while teams can delegate development tasks by simply @ mentioning Amazon Q CLI in issues and merge requests. The robust security and governance capabilities built into these integrations give enterprises the confidence to leverage AI coding tools while preserving their development standards. Our partnership with GitLab demonstrates AWS' ongoing commitment to expanding our AI ecosystem and making intelligent development tools accessible wherever developers work.\"*\n>\n> \\- **Deepak Singh, Vice President of Developer Agents and Experiences at AWS**\n\n* **CLI agent support for Claude Code, Codex, Amazon Q, Google Gemini, and opencode (Bring Your Own Key):** 18.3 introduces integrations that enable teams to delegate routine development work by @ mentioning their agents directly in issues or merge requests. When developers mention these AI assistants, they automatically read the surrounding context and repository code, then respond to the user's comment with either ready-to-review code changes or inline comments. These integrations require you to bring your own API key for the respective AI providers and keep all interactions natively within GitLab's interface while maintaining proper permissions and audit trails.\n\n\n  **Note:** Third-party agents is a GitLab Premium Beta feature and only available to GitLab Duo Enterprise customers for evaluation.  \n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111784124?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Third Party Agents Flows Claude Code\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> *“Bringing Claude Code directly into GitLab puts AI assistance where millions of developers already collaborate and ship code daily. The ability to mention Claude directly in issues and merge requests removes friction while maintaining quality with human oversight and review processes. This update brings Claude Code's capabilities to more places where teams work, making AI a natural part of their developer workflow.”*\n>\n> **\\- Cat Wu, Claude Code Product Lead, Anthropic**\n>\n> *“With GitLab's new agent integration in 18.3 you can use opencode within your existing workflows. You can @mention opencode in an issue or merge request and it'll run your agent right in your CI pipeline. This ability to configure and run opencode the way you want is the type of integration we know the open source community really values.”*\n>\n> **\\- Jay V., CEO, opencode**\n\n* **Agentic Chat support for Visual Studio IDE and GitLab UI available to all Premium and Ultimate customers:** With 18.3, you no longer need to context-switch between tools to access GitLab's full development lifecycle data. Our enhanced integrations bring the complete power of GitLab Duo into the GitLab UI as well as IDEs — expanding support from JetBrains and VS Code to now include Visual Studio. This helps developers stay in flow while accessing rich project context, deployment history, and team collaboration data directly within their preferred environment.\n* **Expanded AI model support:** GitLab Duo Self-Hosted now supports additional AI models, giving teams more flexibility in their AI-supported development workflows. You can now deploy open source OpenAI GPT models (20B and 120B parameters) through vLLM on your datacenter hardware, or through cloud services like Azure OpenAI and AWS Bedrock in your private cloud. Additionally, Anthropic's Claude 4 is available on AWS Bedrock\n\n### New automated development flows\n\nGitLab Flows coordinate multiple AI agents with pre-built instructions to autonomously handle those time-consuming, mundane tasks so developers can focus on the work that matters most.\n\nGitLab 18.3 comes with two new Flows:\n\n* **Issue to MR Flow enabling automated code generation from concept to completion in minutes:** This Flow automatically converts issues into actionable merge requests (MRs) by coordinating agents to analyze requirements, prepare comprehensive implementation plans, and generate production-grade code that's ready for review — helping you turn ideas into reviewable implementations in minutes, not hours.\n\n\u003Cdiv style=\"padding:75% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111782058?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Issue to MR\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n* **Convert CI File Flow built for seamless migration intelligence:** Our Convert CI File Flow streamlines migration workflows by having agents analyze existing CI/CD configurations and intelligently convert them to GitLab CI format with full pipeline compatibility. This helps eliminate the manual effort and potential errors of rewriting CI configurations from scratch, enabling teams to migrate entire deployment pipelines with confidence. 18.3 includes support for Jenkins migrations. Additional support is planned for future releases.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111783724?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Convert to CI Flow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### Intelligent code and search\n\nAI point solutions typically operate with limited visibility into isolated code snippets, but GitLab's Knowledge Graph provides agents with environment context to help inform faster and more intelligent responses.\n\n* **Knowledge Graph for real-time code intelligence:** With 18.3, GitLab's Knowledge Graph now delivers real-time code indexing to enable faster code searches, delivering more accurate and contextual results. By understanding the relationships between files, dependencies, and development patterns across your entire codebase, our agents are designed to provide insights that would take human developers hours to uncover — **and this is just the first step in unlocking the powerful capabilities that are planned for Knowledge Graph.**\n\n### Enterprise governance\n\nAI transparency and organizational control are critical challenges that can hold teams back from fully adopting AI-powered development tools, with [85% of executives agreeing that agentic AI will create unprecedented security challenges](https://about.gitlab.com/software-innovation-report/).\n\nThese new features in 18.3 help address concerns around data governance, compliance requirements, and the need for visibility into AI decision-making processes so organizations can integrate AI within their existing security and policy frameworks.\n\n* **Agent Insights for transparency through intelligence:** Our built-in agent tracking provides visibility into agent decision-making processes. Users can optimize workflows and follow best practices through transparent activity tracking.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111783244?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Insights\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n* **GitLab Duo Code Review for Self-Hosted:** This brings the intelligence of GitLab Duo to organizations with strict data governance requirements by allowing teams to keep sensitive code in controlled environments.\n* **Hybrid model configurations for flexible AI deployment:** GitLab Duo Self-Hosted customers can now use hybrid model configurations, combining self-hosted AI models via their local AI gateway with GitLab's cloud models through GitLab's AI gateway, enabling access to various features.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1111783569?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Self Hosted Models Code Review\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n* **Enhanced security with OAuth support:** Our MCP server now includes full OAuth 2.0 authentication support, enabling secure connections to protected resources and sensitive development environments. This implementation follows the draft OAuth specification for MCP, handling authorization flows, token management, and dynamic client registration. \n\n### Secure by Design platform: Governance that scales\n\nTrue platform security requires consistent application of governance principles across every layer of the development lifecycle. The same security fundamentals that make AI adoption safe — least-privilege access, centralized policy management, proactive monitoring, and granular permissions — must be embedded throughout the entire SDLC to create a cohesive, defense-in-depth approach.\n\nGitLab 18.3 strengthens the foundational controls that help protect your entire software supply chain with these new updates:\n\n* **Custom admin role:** Provides granular, purpose-built administrative permissions, replacing blanket admin access with precise, least-privilege controls. Instead of granting blanket administrative privileges that create security risks, organizations can now create specialized roles tailored to specific functions — platform teams managing runners and monitoring, support teams handling user management, and leadership accessing dashboards and usage statistics. With complete role lifecycle management through UI and API, audit logging, and auto-generated documentation, this feature enables true least-privilege administration while helping maintain operational efficiency and improve overall instance security.\n* **Instance-level compliance framework and security policy management**: Organizations can now designate a dedicated compliance group that has the authority to apply standardized frameworks and security policies directly to top-level groups, automatically cascading enforcement to all their subgroups and projects. This centralized approach eliminates the compliance adoption blocker of fragmented policy management while maintaining group autonomy for additional local policies.\n* **Enhanced violations reporting:** Teams now receive immediate notifications when unauthorized changes are made to MR approval rules, framework policies lack proper approvals, or time-based compliance controls are violated. By directly linking violations to specific compliance framework controls, teams get actionable insights that tell them exactly which requirement was breached, turning compliance from a reactive checkbox exercise into a proactive, integrated part of the development and security workflow.\n* **Fine-grained permissions for CI/CD job tokens:** Replaces broad token access with granular, explicit permissions that grant CI/CD jobs access only to specific API endpoints they actually need. Instead of allowing jobs blanket access to project resources, teams can now define precise permissions for deployments, packages, releases, environments, and other critical resources, reducing the attack surface and potential for privilege escalation.\n* **AWS Secrets Manager integration:** Teams using AWS Secrets Manager can now retrieve secrets directly in GitLab CI/CD jobs, simplifying the build and deploy processes. Secrets are accessed by a GitLab Runner using OpenID Connect protocol-based authentication, masked to prevent exposure in job logs, and destroyed after use. This approach eliminates the need to store secrets in variables and integrates cleanly into existing GitLab and AWS-based workflows. Developed in close collaboration with Deutsche Bahn and the AWS Secrets Manager team, this integration reflects our commitment to building solutions alongside customers to solve real-world challenges.\n\n### Artifact management: Securing your software supply chain\n\nWhen artifacts aren't properly governed, small changes can have big consequences. Mutable packages, overwritten container images, and inconsistent rules across tools can trigger production outages, introduce vulnerabilities, and create compliance gaps. For enterprise DevSecOps, secure, centralized artifact management is essential for keeping the software supply chain intact.\n\n#### Enterprise-grade artifact protection in 18.3\n\nBuilding on our comprehensive package protection capabilities, GitLab 18.3 adds important new features:\n\n* **Conan revisions support:** New in 18.3, [Conan revisions](https://docs.gitlab.com/user/packages/conan_2_repository/#conan-revisions) provide package immutability for C++ developers. When changes are made to a package without changing its version, Conan calculates unique identifiers to track these changes, enabling teams to maintain immutable packages while preserving version clarity.\n* **Enhanced Container Registry security:** Following the successful launch of [immutable container tags](https://docs.gitlab.com/user/packages/container_registry/immutable_container_tags/) in 18.2, we're seeing strong enterprise adoption. Once a tag is created that matches an immutable rule, no one — regardless of permission level — can modify that container image, preventing unintended changes to production dependencies.\n\nThese enhancements complement our existing protection capabilities for npm, PyPI, Maven, NuGet, Helm charts, and generic packages, enabling platform teams to implement consistent governance across their entire software supply chain — a requirement for organizations building secure internal developer platforms.\n\nUnlike standalone artifact solutions, GitLab's integrated approach eliminates context switching between tools while providing end-to-end traceability from code to deployment, enabling platform teams to implement consistent governance across their entire software delivery pipeline.\n\n### Embedded views: Real-time visibility and reports\n\nAs GitLab projects grow in complexity, teams find themselves navigating between issues, merge requests, epics, and milestones to maintain visibility into work status. The challenge lies in consolidating this information efficiently while ensuring teams have real-time access to project progress without context switching or breaking their flow.\n**Launching real-time work status visibility in 18.3**\nGitLab 18.3's [embedded views, powered by our powerful GitLab Query Language](https://docs.gitlab.com/user/glql/#embedded-views) (GLQL), eliminate context switching by bringing live project data directly into your workflow:\n* **Dynamic views:** Insert live GLQL queries in Markdown code blocks throughout wiki pages, epics, issues, and merge requests that automatically refresh with current project states each time you load the page.\n* **Contextual personalization:** Views automatically adapt using functions like `currentUser()` and `today()` to show relevant information for whoever is viewing, without manual configuration.\n* **Powerful filtering:** Filter by 25+ fields, including assignee, author, label, milestone, health status, and creation date.\n* **Display flexibility:** Present data as tables, lists, or numbered lists with customizable field selection, item limits, and sort orders to keep your views focused and actionable\n\nUnlike fragmented project management approaches, we've designed embedded views to maintain your workflow continuity while providing real-time visibility, enabling teams to make informed decisions without losing focus or switching between multiple tools and interfaces.\n\n> Learn about the [newest features in GitLab 18.3](https://about.gitlab.com/releases/2025/08/21/gitlab-18-3-released/).\n## Get started today\nGitLab 18.3 is available now for GitLab Premium and Ultimate users on GitLab.com and self-managed environments.\n\nGitLab Dedicated customers are now upgraded to 18.2 and will be able to use the features released with GitLab 18.3 next month.\n\nReady to experience the future of software engineering?[ Enable beta and experimental features for GitLab Duo](https://docs.gitlab.com/user/gitlab_duo/turn_on_off/#turn-on-beta-and-experimental-features) and start collaborating with AI agents that understand your complete development context.\n\nNew to GitLab? [Start your free trial](https://gitlab.com/-/trials/new) today and discover why the future of software engineering is human and AI collaboration, orchestrated through the world's most comprehensive DevSecOps platform.\n\n\u003Cp>\u003Csmall>\u003Cem>This blog post contains “forward-looking statements” within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934. Although we believe that the expectations reflected in the forward-looking statements contained in this blog post are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to be materially different from any future results or outcomes expressed or implied by the forward-looking statements.\u003C/em>\u003C/p>\n\u003Cp>\u003Cem>Further information on risks, uncertainties, and other factors that could cause actual outcomes and results to differ materially from those included in or contemplated by the forward-looking statements contained in this blog post are included under the caption “Risk Factors” and elsewhere in the filings and reports we make with the Securities and Exchange Commission. We do not undertake any obligation to update or release any revisions to any forward-looking statement or to report any events or circumstances after the date of this blog post or to reflect the occurrence of unanticipated events, except as required by law.\u003C/em>\u003C/small>\u003C/p>",[738,9,496,717,691],{"featured":91,"template":693,"slug":1574},"gitlab-18-3-expanding-ai-orchestration-in-software-engineering","content:en-us:blog:gitlab-18-3-expanding-ai-orchestration-in-software-engineering.yml","Gitlab 18 3 Expanding Ai Orchestration In Software Engineering","en-us/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering.yml","en-us/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering",{"_path":1580,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1581,"content":1585,"config":1592,"_id":1594,"_type":14,"title":1595,"_source":16,"_file":1596,"_stem":1597,"_extension":19},"/en-us/blog/gitlab-achieves-iso-iec-42001-certification-for-ai-governance",{"config":1582,"title":1583,"description":1584},{"noIndex":6},"GitLab achieves ISO/IEC 42001 certification for AI governance","Learn about this new ISO certification, our associated GitLab Duo features, and our commitment to responsible AI development.",{"title":1583,"description":1584,"authors":1586,"heroImage":1588,"date":1589,"body":1590,"category":10,"tags":1591},[1587],"Davoud Tu","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098208/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_479904468%20%281%29_4lmOEVlaXP0YC3hSFmOw6i_1750098208185.jpg","2025-09-02","Artificial intelligence (AI) is transforming how we work and solve problems across every industry. As AI becomes more integrated into business processes and decision-making, the need for robust AI governance frameworks has never been more critical. Organizations must balance the potential opportunity of AI with ensuring AI systems are built safely, ethically, and with accountability.\n\nAs part of our commitment to responsible AI management, we're excited to announce that GitLab has achieved the ISO/IEC 42001 certification, the first internationally recognized standard for establishing, implementing, maintaining, and continually improving an Artificial Intelligence Management System (AIMS) within organizations. \n\nThe scope of the certification includes our comprehensive AI offering, GitLab Duo, as well as GitLab Duo Agent Platform and its components. As a leader in DevSecOps, GitLab provides AI-powered features across the development lifecycle, including capabilities such as:\n\n* [GitLab Duo Agent Platform](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) (now in public beta, general availability planned for later this year): Enables asynchronous collaboration between developers and specialized AI agents throughout the software development lifecycle, helping transform linear development processes into dynamic, parallel workflows while providing agents with access to all of the software engineering context stored within GitLab's unified platform.\n\n* [Code Suggestions](https://docs.gitlab.com/user/project/repository/code_suggestions/) (generally available): Allows developers to stay in flow by predictively completing code blocks, defining function logic, generating tests, and proposing common code like regex patterns, all in the same environment where they already code.\n\n* [Vulnerability Explanation](https://docs.gitlab.com/user/application_security/vulnerabilities/#explaining-a-vulnerability) (generally available): Helps developers and security analysts understand vulnerabilities, how they might be exploited, and how to fix them.\n\n* [Test Generation](https://docs.gitlab.com/user/gitlab_duo/) (generally available): Creates tests automatically for selected code, improving coverage and reducing manual effort.\n\n## What this certification means for GitLab users\n\n**Enhanced trust and transparency:** Our AI features are built and managed according to globally recognized best standards for AI governance, supporting reliability and ethical implementation.\n\n**Strategic risk management:** We've implemented risk assessment and risk treatment strategies for AI components within our platform, considering aspects such as operational business continuity risks, technical risks, security and privacy risks, and broader societal implications. This proactive approach enhances customer data protection and facilitates more reliable AI-powered features.\n\n**Continuous improvement:** Under the ISO/IEC 42001 framework, we will work to continuously evaluate and enhance our AI capabilities through annual external surveillance audits, regular internal assessments, and leadership AIMS review while maintaining standards of quality and responsibility.\n\n**Regulatory alignment:** As AI regulations continue to evolve globally, like the EU AI Act, this certification supports GitLab's alignment with emerging regulatory requirements.\n\nThis achievement validates GitLab's position as the trusted platform for AI-powered DevSecOps, and we are excited to continue leading the way in responsible AI innovation.\n\n## Learn more\n\n- View the ISO/IEC 42001 certificate at the [GitLab Trust Center](https://trust.gitlab.com/).\n- Read about our [AIMS in our handbook](https://handbook.gitlab.com/handbook/security/isms/).\n- Check out the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/).\n- Explore all [GitLab Duo features and capabilities](https://docs.gitlab.com/user/gitlab_duo/) in our documentation.",[1181,9,738],{"featured":91,"template":693,"slug":1593},"gitlab-achieves-iso-iec-42001-certification-for-ai-governance","content:en-us:blog:gitlab-achieves-iso-iec-42001-certification-for-ai-governance.yml","Gitlab Achieves Iso Iec 42001 Certification For Ai Governance","en-us/blog/gitlab-achieves-iso-iec-42001-certification-for-ai-governance.yml","en-us/blog/gitlab-achieves-iso-iec-42001-certification-for-ai-governance",{"_path":1599,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1600,"content":1606,"config":1612,"_id":1614,"_type":14,"title":1615,"_source":16,"_file":1616,"_stem":1617,"_extension":19},"/en-us/blog/gitlab-ai-assisted-features",{"title":1601,"description":1602,"ogTitle":1601,"ogDescription":1602,"noIndex":6,"ogImage":1603,"ogUrl":1604,"ogSiteName":706,"ogType":707,"canonicalUrls":1604,"schema":1605},"GitLab details AI-assisted features in the DevSecOps platform","In a fireside chat, CEO and co-founder Sid Sijbrandij shared demos of AI-assisted features available today in gitlab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669077/Blog/Hero%20Images/ai-fireside-chat.png","https://about.gitlab.com/blog/gitlab-ai-assisted-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab details AI-assisted features in the DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2023-05-03\",\n      }",{"title":1601,"description":1602,"authors":1607,"heroImage":1603,"date":1609,"body":1610,"category":10,"tags":1611},[1608],"Sid Sijbrandij","2023-05-03","\nThis morning, GitLab’s Chief Financial Officer Brian Robins and I led a fireside chat focused on [GitLab’s AI strategy](https://ir.gitlab.com/news-releases/news-release-details/gitlab-hold-ai-fireside-chat-sid-sijbrandij), AI’s role in solving customer pain points, and our AI product roadmap.\n\nAI marks a big industry shift that will make it easier to develop, secure, and operate software. We plan to infuse AI throughout the software development lifecycle by incorporating it into our comprehensive enterprise DevSecOps platform.\n\nWe will lead with a customer-centric approach focused on privacy first, where customers know their intellectual property is secured. One way we are accomplishing this is with [our recently announced generative AI partnership with Google](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html). This will allow GitLab to use Google's generative AI foundation models to provide customers with AI-powered offerings within our cloud infrastructure. We’ll maintain our commitment to protecting user privacy by containing customer intellectual property and source code within GitLab's cloud infrastructure.\n\nWatch the AI fireside chat:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/ejWeMdVz8Nk\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n\u003C/figure>\n\u003C!-- blank line -->\n\nDuring the fireside chat, we introduced AI-assisted features available to GitLab customers today on gitlab.com. We provided a live demo of these capabilities that can be utilized by everyone throughout the software development lifecycle. \n\n![List of AI-assisted capabilities](https://about.gitlab.com/images/blogimages/ai-assisted-capabilities-detailed.png){: .shadow}\n\nWe also discussed how these capabilities are focused on three personas: development, security and operations teams, and have features available for all users. Watch the demos for these capabilities available on gitlab.com today:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/ILJeqWoVswM\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## AI for Developer Teams \n\n### Code Suggestions\n- Enables developers to write code more efficiently by viewing code suggestions as they type. \nLearn more about [Code Suggestions](/blog/ai-assisted-code-suggestions/).\n\n### Suggested Reviewers\n- Helps customers receive faster and higher quality reviews by automatically finding the right people to review a merge request.\nLearn more about [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html).\n\n### Summarize MR Changes\n- Helps merge request authors to drive alignment and action by efficiently communicating the impact of their changes.\nLearn more about [Summarize MR Changes](/blog/merge-request-changes-summary-ai/).\n\n### Summarize My MR Review\n- Enables better handoffs between authors and reviewers and helps reviewers efficiently understand many merge request suggestions. \nLearn more about [Summarize My MR Review](/blog/summarize-my-merge-request-review/).\n\n## AI for Security and Operations\n\n### Explain This Vulnerability\n- Helps developers remediate vulnerabilities more efficiently and uplevel their skills, enabling them to write more secure code.\nLearn more about [Explain This Vulnerability](/blog/explain-this-vulnerability/).\n\n### Generate Tests in MRs\n- Automates repetitive tasks for developers and helps them catch bugs early.\nLearn more about [Generate Tests in MRs](/blog/merge-request-suggest-a-test/).\n\n### Explain This Code\n- Allows DevSecOps teams to get up to speed quickly on code.\nLearn more about [Explain This Code](/blog/explain-this-code/).\n\n## AI for everyone\n\n### Issue Comment Summaries\n- Quickly gets everyone up to speed on lengthy conversations to ensure they are all on the same page.\nLearn more about [Issue Comment Summaries](/blog/summarize-issues/).\n\n### GitLab Chat\n- Helps quickly identify useful information in large volumes like documentation.\nLearn more about [GitLab Chat](https://gitlab.com/groups/gitlab-org/-/epics/10220).\n\n### Value Stream Forecasting\n- Predicts productivity metrics and identifies anomalies across your software development lifecycle.\nLearn more about [Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nThese are just the beginning of many features we have in the works leveraging generative AI to provide our customers [AI-assisted features](/topics/devops/the-role-of-ai-in-devops/) across our DevSecOps platform. With our value of iteration at the heart of our work, we are actively improving all the capabilities we announced today as well as introducing new capabilities. [AI is in all we do](/company/yearlies/#fy24-yearlies) and we intend to ship many capabilities throughout the year as they become ready.  \n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)",[1181,717,9,716],{"slug":1613,"featured":6,"template":693},"gitlab-ai-assisted-features","content:en-us:blog:gitlab-ai-assisted-features.yml","Gitlab Ai Assisted Features","en-us/blog/gitlab-ai-assisted-features.yml","en-us/blog/gitlab-ai-assisted-features",{"_path":1619,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1620,"content":1626,"config":1633,"_id":1635,"_type":14,"title":1636,"_source":16,"_file":1637,"_stem":1638,"_extension":19},"/en-us/blog/gitlab-ai-cicd-customization-toolkit",{"title":1621,"description":1622,"ogTitle":1621,"ogDescription":1622,"noIndex":6,"ogImage":1623,"ogUrl":1624,"ogSiteName":706,"ogType":707,"canonicalUrls":1624,"schema":1625},"GitLab AI, CI/CD and customization for secure scaled growth","Find out how the latest developments for the GitLab AI-powered DevSecOps Platform help organizations scale to enterprise levels.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679194/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/gitlab-ai-cicd-customization-toolkit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Flouton\"}],\n        \"datePublished\": \"2023-10-31\",\n      }",{"title":1627,"description":1622,"authors":1628,"heroImage":1623,"date":1630,"body":1631,"category":10,"tags":1632},"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit",[1629],"Mike Flouton","2023-10-31","\nScaling up to enterprise-level intensifies the demand for rapid, secure software delivery. Large organizations can easily fall into the trap of single-function silos, making collaboration tricky and slowing development. Over the past few months, we've introduced new capabilities for the GitLab AI-powered DevSecOps Platform to help teams address these hurdles, accelerate innovation, ensure compliance, and fortify their digital defenses.\n- [AI capabilities that reshape speed and security](#ai-capabilities-that-reshape-speed-and-security)\n- [A single, enterprise-ready DevSecOps platform](#a-single-enterprise-ready-devsecops-platform) \n- [A customizable solution that fits the way you work](#a-customizable-solution-that-fits-the-way-you-work)\n\nLet’s take a closer look at what we've been working on and how these advancements benefit growing organizations.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n## AI capabilities that reshape speed and security\nAI will transform the way organizations develop software. Our [State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai) report, released earlier this year, demonstrates this: 83% of DevSecOps professionals surveyed said implementing AI in their software development processes is essential to avoid falling behind competitors. \n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) is a powerful set of AI capabilities within GitLab’s DevSecOps Platform that helps to speed up development of code, improve operations, and secure software. Since its debut in June, we’ve been steadily expanding the suite of AI capabilities. These now extend across the entire software development lifecycle – from suggesting code, to finding and explaining vulnerabilities in code, to identifying appropriate code reviewers. As enterprises increase code generation, they can avoid potential bottlenecks, such as security checks, further downstream.\n\nFor example, we recently released our [GitLab Duo Vulnerability Explanation feature into Beta](https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai/). Typically, vulnerability discovery and mitigation would require a significant amount of back-and-forth between development and application security teams to agree on severity levels and approaches to fix the vulnerability. Vulnerability Explanation alleviates this inefficiency by summarizing detected vulnerabilities and their implications as well as providing in-depth solutions and suggested mitigation within the developer’s workflow, enabling faster resolution and creation of safer code within the development workflow. \n\n![GitLab Duo Vulnerability Explanation](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n\nFor even more efficiency, [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) (Beta) helps developers create new code and update existing code faster. [GitLab Duo Suggested Reviewers](https://about.gitlab.com/blog/gitlab-suggested-reviewers/) (generally available to all users) helps teams make an informed decision when choosing reviewers that can meet their review criteria.\n\nLearn about [all GitLab Duo capabilities](https://about.gitlab.com/gitlab-duo/).\n\nWatch GitLab Duo capabilities in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LifJdU3Qagw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## A single, enterprise-ready DevSecOps platform \nEnterprise needs from a software delivery platform are unique. A DevSecOps platform must support the ability to:\n- build for speed with adequate security guardrails right from the start\n- consolidate to a single platform, but still integrate with your existing solution\n- simply adopt and onboard developers, but handle the complexity of scale\n\nGitLab CI/CD is a core way for organizations to meet these requirements. As customers scale their adoption of GitLab, they run millions of CI/CD jobs on a monthly basis. With the efficiency improvements further driven by GitLab Duo, these numbers will likely increase. However, organizations will need to find efficiency opportunities throughout their development and deployment workflows to be able to handle this growth, ensuring that whatever is deploying into production meets their quality, security, and reliability standards.\n\nThe [GitLab CI/CD Component Catalog](https://about.gitlab.com/blog/introducing-ci-components/), which will soon be released into Beta, solves these problems by enabling organizations to standardize their pipelines and create building blocks in a centralized repository that can be easily discovered, reused, and shared across teams. Enterprises can develop base pipeline configurations with the proper compliance, quality, and security checks already built-in for use across their organization. \n\nHere are some more capabilities aimed at improving the enterprise platform experience:\n- The GitLab Runner ecosystem continues to expand as we've recently introduced [GitLab SaaS runners on MacOS](https://about.gitlab.com/releases/2023/09/22/gitlab-16-4-released/#macos-13-ventura-image-for-saas-runners-on-macos), [xlarge and 2xlarge SaaS Runners on Linux](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#more-powerful-gitlab-saas-runners-on-linux), [increased storage on medium and large SaaS Runners on Linux](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/#increased-storage-for-gitlab-saas-runners-on-linux), and [GPU-enabled SaaS Runners on Linux](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#gpu-enabled-saas-runners-on-linux) for supporting data science workloads.\n- GitLab Duo, which was previously only available for GitLab SaaS, is now extended to GitLab self-hosted. Enterprises that prefer to self-host or must self-host due to compliance and regulatory restrictions can now take advantage of our AI features, starting with [Code Suggestions](https://about.gitlab.com/blog/self-managed-support-for-code-suggestions/).\n- Organizations looking at using GitLab Packages as their consolidated package registry can now [import packages](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) from their current package registries like Maven Central or Artifactory. GitLab [supports importing](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) Maven, npm, NuGet, and PyPI package types into GitLab, with many more package formats to follow. \n\n## A customizable solution that fits the way you work\nAs companies grow, there is an increasing need to personalize development and deployment settings and provide distinct visibility into the DevSecOps lifecycle to users beyond the immediate DevSecOps teams. GitLab is designed to function effectively with minimal adjustments, yet it offers the flexibility to be tailored to the requirements of expanding organizations. \n\nOur recent developments, including [changes to product navigation](https://about.gitlab.com/blog/navigation-research-blog-post/), are driven by comprehensive user research. We recognize that each organization and its individual users have unique, preferred workflows. Our updated navigation features, such as pinning frequently accessed items, visualizing work, and simplifying navigation through fewer top-level items, empower DevSecOps teams to align the platform with their optimal environment and workflow.\n\nWatch the new and simplified navigation in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/rGTl9_HIpbY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nHere are some other highlights:\n- In addition to overhauling the navigation, we [introduced the rich text editor](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#all-new-rich-text-editor-experience) by providing a “what you see is what you get” editing experience. The rich text editor is now available in all issues, epics, and merge requests.\n- GitLab offers [six out-of-the-box roles](https://docs.gitlab.com/ee/user/permissions.html#roles), but for many enterprises this was not enough. Some roles gave too much permission, while others didn’t grant enough permissions to complete a task. Enterprises needed a way to define their own roles – leading to [customizable roles](https://docs.gitlab.com/ee/user/custom_roles.html), which gives GitLab administrators the ability to define roles with granular permissions suited for their needs.\n- GitLab Value Streams Dashboard ensures that all stakeholders have visibility into the progress and value delivery metrics associated with software development and delivery. To align with customers’ needs to customize the data viewed and the appearance, we introduced [new velocity metrics](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#new-velocity-metrics-in-the-value-streams-dashboard) and the ability to [customize the appearance and data](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#new-customization-layer-for-the-value-streams-dashboard) to adjust metrics based on their areas of interest, filter out irrelevant information, and focus on the data that is most relevant to their analysis or decision-making process.\n\n![New velocity metrics in the Value Streams Dashboard](https://about.gitlab.com/images/16_3/16.3_vsd.mr_iss.png)\n\n\n## The enterprise awaits — get growing today\t\nOrganizations on a growth trajectory need a way to sustain that growth. They'll need to leverage the capabilities of AI to generate code faster — but they can't sacrifice quality or security. Organizations will also need to set standards for development and deployment that extend across the enterprise, and every user will need a clear and customizable view of the DevSecOps lifecycle. As we bring new capabilities into the GitLab DevSecOps Platform, we will continue to support these enterprise-class needs.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n",[738,109,9,717],{"slug":1634,"featured":6,"template":693},"gitlab-ai-cicd-customization-toolkit","content:en-us:blog:gitlab-ai-cicd-customization-toolkit.yml","Gitlab Ai Cicd Customization Toolkit","en-us/blog/gitlab-ai-cicd-customization-toolkit.yml","en-us/blog/gitlab-ai-cicd-customization-toolkit",{"_path":1640,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1641,"content":1646,"config":1652,"_id":1654,"_type":14,"title":1655,"_source":16,"_file":1656,"_stem":1657,"_extension":19},"/en-us/blog/gitlab-chat-ai",{"title":1642,"description":1643,"ogTitle":1642,"ogDescription":1643,"noIndex":6,"ogImage":868,"ogUrl":1644,"ogSiteName":706,"ogType":707,"canonicalUrls":1644,"schema":1645},"ML experiment: Use a chatbot to answer how-to questions","Learn how GitLab is experimenting with a docs chatbot that you can ask product questions in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/gitlab-chat-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Use a chatbot to answer how-to questions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2023-05-04\",\n      }",{"title":1642,"description":1643,"authors":1647,"heroImage":868,"date":1649,"body":1650,"category":10,"tags":1651},[1648],"Sarah Waldner","2023-05-04","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nAt GitLab, [everyone can contribute](/company/mission/). As a platform, GitLab offers a wide variety of features, but it is hard to know everything GitLab is capable of without diligently combing through our documentation, testing out our features, or talking to someone at GitLab who can answer your questions. Reading documentation can send users down a rabbit hole and they may not get their question answered. This can lead to time spent with support, meetings with the Customer Success team, going back and forth with a solutions architect, coordinating with technical account managers, and maybe opting for some professional services hours. What if users had access to all the knowledge they needed at their fingertips 24/7 and could get their complex questions answered immediately?\n        \nIn an experimental feature, the [Global Search team](/handbook/engineering/development/enablement/data_stores/search/) used AI to create a chatbot that answers how-to questions about the GitLab product. It will respond with an explanation and relevant links to our documentation.   \n\n![GitLab Chat answering a simple question](https://about.gitlab.com/images/blogimages/gitlab_chat.gif){: .shadow}\n\nGetting answers on how-to questions while in the product eliminates time lost to context switching. The chat interface overlays the GitLab UI, which enables you to interface with a virtual expert alongside your work. This is especially helpful when you are involved in a complex multi-step task like setting up a gitlab-ci.yml file, configuring security policies, or editing a CODEOWNERS file.\n\nGitLab chat will answer any question that you would utilize our product documentation to answer. It also provides helpful links if the bot's response is not as detailed as you needed - the full documentation is just one click away.\n\n## Be part of our AI-assisted features journey\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":1653,"featured":6,"template":693},"gitlab-chat-ai","content:en-us:blog:gitlab-chat-ai.yml","Gitlab Chat Ai","en-us/blog/gitlab-chat-ai.yml","en-us/blog/gitlab-chat-ai",{"_path":1659,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1660,"content":1664,"config":1670,"_id":1672,"_type":14,"title":1673,"_source":16,"_file":1674,"_stem":1675,"_extension":19},"/en-us/blog/gitlab-duo-agent-platform-public-beta",{"noIndex":6,"title":1661,"description":1662,"ogImage":1663},"GitLab Duo Agent Platform goes public beta","Introducing the DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1752678395/impw8no5tbskr6k2afgu.jpg",{"tags":1665,"category":10,"date":1666,"heroImage":1663,"authors":1667,"description":1662,"title":1668,"body":1669},[9,738,717,1181],"2025-07-17",[1568],"GitLab Duo Agent Platform Public Beta: Next-gen AI orchestration and more","**We're building the future of software development.**\n\nAt GitLab, we are [reimagining the future of software engineering](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) as a human and AI collaboration. Where developers focus on solving technical, complex problems and driving innovation, while AI agents handle the routine, repetitive tasks that slow down progress. Where developers are free to explore new ideas in code at much lower cost, bug backlogs are a thing of the past, and users of the software you build enjoy a more usable, reliable, and secure experience. This isn't a distant dream. We're building this reality today, and it is called the GitLab Duo Agent Platform.\n\n## What is GitLab Duo Agent Platform?\n\nGitLab Duo Agent Platform is our next-generation DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents. It will transform your development workflow from isolated linear processes into dynamic collaboration where specialized AI agents work alongside you and your team on every stage of the software development lifecycle; it will be like having an unlimited team of colleagues at your disposal.\n\nImagine delegating a complex refactoring task to a Software Developer Agent while simultaneously having a Security Analyst Agent scan for vulnerabilities and a Deep Research Agent analyze progress across your repository history. This all happens in parallel, orchestrated seamlessly within GitLab.\n\nToday, we are announcing the launch of the [first public beta of the GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) for GitLab.com and self-managed GitLab Premium and Ultimate customers. This is just the first in a series of updates that will improve how software gets planned, built, verified, and deployed as we amplify human ingenuity through intelligent automation.\n\nThis first beta focuses on unlocking the IDE experience through the GitLab VS Code extension and JetBrains IDEs plug-in; next month, we plan on bringing the Duo Agent Platform experience to the GitLab application and expand our IDE support. Let me share a bit more about our vision for the roadmap between now and general availability, planned for later this year. You can find details about the first beta down below.\n\nWatch this video or read on for what's available now and what's to come. Then, if you're ready to get started with Duo Agent Platform, [find out how with the public beta](#get-started-now).\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101993507?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Agent Platform Beta Launch_071625_MP_v2\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## GitLab's unique position as an orchestration platform\n\nGitLab sits at the heart of the development lifecycle as the system of record for engineering teams, orchestrating the entire journey from concept to production for over 50 million registered users, including half of the Fortune 500 across geographies. This includes over 10,000 paying customers across all segments and verticals, including public institutions.\n\nThis gives GitLab something no competitor can match: a comprehensive understanding of everything it takes to deliver software. We bring together your project plans, code, test runs, security scans, compliance checks, and CI/CD configurations to not only power your team but also orchestrate collaboration with AI agents you control.\n\nAs an intelligent, unified DevSecOps platform, GitLab stores all of the context about your software engineering practice in one place. We will expose this unified data to AI agents via our knowledge graph. Every agent we build has automatic access to this SDLC-connected data set, providing rich context so agents can make informed recommendations and take actions that adhere to your organizational standards.\n\n**Here's an example of this advantage in action.** Have you ever tried to figure out exactly how a project is going across dozens, if not hundreds, of stories and issues being worked on across all the developers involved? Our Deep Research Agent leverages the GitLab Knowledge Graph and semantic search capabilities to traverse your epic and all related issues, and explore the related codebase and surrounding context. It quickly correlates information across your repositories, merge requests, and deployment history. This delivers critical insights that standalone tools can't match and that would take human developers hours to uncover. \n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101998114?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Deep Research Demo_071625_MP_v1\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Our strategic evolution from AI features to agent orchestration\n\nGitLab Duo started as an add-on, bringing generative AI to developers through Duo Pro and Enterprise. With GitLab 18.0, it's now built into the platform. We've unlocked [Duo Agentic Chat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/) and Code Suggestions for all Premium and Ultimate users, and now we're providing immediate access to the Duo Agent Platform.\n\nWe've ramped up engineering investment and are accelerating delivery, with powerful new AI features landing every month. But we're not just building another coding assistant. GitLab Duo is becoming an agent orchestration platform, where you can create, customize, and deploy AI agents that work alongside you and interoperate easily with other systems, dramatically increasing productivity. \n\n> **“GitLab Duo Agent Platform enhances our development workflow with AI that truly understands our codebase and our organization. Having GitLab Duo AI agents embedded in our system of record for code, tests, CI/CD, and the entire software development lifecycle boosts productivity, velocity, and efficiency. The agents have become true collaborators to our teams, and their ability to understand intent, break down problems, and take action frees our developers to tackle the exciting, innovative work they love.”** - Bal Kang, Engineering Platform Lead at NatWest\n\n### Agents that work out of the box\n\nWe are introducing agents that mirror familiar team roles. These agents can search, read, create, and modify existing artifacts across GitLab. Think of these as agents you can interact with individually, that also act as building blocks that you can customize to create your own agents. Like your team members, agents have defined specializations, such as software development, testing, or technical writing. As specialists, they're tapping into the right context and tools to consistently accomplish the same types of tasks, wherever they're deployed.\n\nHere are some of the agents we're building today:\n\n- **Chat Agent (now in beta):** Takes natural language requests to provide information and context to the user. Can perform general development tasks, such as reading issues or code diffs. As an example, you can ask Chat to debug a failed job by providing the job URL.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1102616311?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-chat-in-web-ui-demo_Update V2\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\u003Cp>\u003C/p>\n\n- **Software Developer Agent (now in beta):** Works on assigned items by creating code changes in virtual development environments and opening merge requests for review.\n\n- **Product Planning Agent:** Prioritizes product backlogs, assigns work items to human and agentic team members, and provides project updates over specified timelines.\n\n- **Software Test Engineer Agent:** Tests new code contributions for bugs and validates if reported issues have been resolved. \n\n- **Code Reviewer Agent:** Performs code reviews following team standards, identifies quality and security issues, and can merge code when ready.\n\n- **Platform Engineer Agent:** Monitors GitLab deployments, including GitLab Runners, tracks CI/CD pipeline health, and reports performance issues to human platform engineering teams.\n\n- **Security Analyst Agent:** Finds vulnerabilities within codebases and deployed applications, and implements code and configuration changes to help resolve security weaknesses.\n\n- **Deployment Engineer Agent:** Deploys updates to production, monitors for unusual behavior, and rolls back changes that impact application performance or security.\n\n- **Deep Research Agent:** Conducts comprehensive, multi-source analysis across your entire development ecosystem.\n\nWhat makes these agents powerful is their native access to GitLab's comprehensive toolkit. Today, we have over 25 tools, from issues and epics to merge requests and documentation, with more to come. Unlike external AI tools that operate with limited context, our agents work as true team members with full platform privileges under your supervision.\n\nIn the coming months, you'll also be able to modify these agents to meet the needs of your organization. For example, you'll be able to specify that a Software Test Engineer Agent follows best practices for a particular framework or methodology, deepening its specialization and turning it into an even more valuable team member.\n\n## Flows orchestrate complex agent tasks\n\nOn top of individual agents, we are introducing agent Flows. Think of these as more complex workflows that can include multiple agents with pre-built instructions, steps, and actions for a given task that can run autonomously. \n\nWhile you can create Flows for basic tasks common to individuals, they truly excel when applied to complex, specialized tasks that would normally take hours of coordination and effort to complete. Flows will help you finish complex tasks faster and, in many cases, asynchronously without human intervention.\n\nFlows have specific triggers for execution. Each Flow contains a series of steps, and each step has detailed instructions that tell a specialized agent what to do. This granular approach allows  you to give precise instructions to agents in the Flow. By defining instructions in greater detail and establishing structured decision points, Flows can help solve for the inherent variability in AI responses while eliminating the need to repeatedly specify the same requirements, unlocking more consistent and predictable outcomes without user configuration.\n\nHere are some examples of out-of-the-box Flows that we are building:\n\n- **Software Development Flow (now in beta):** Orchestrates multiple agents to plan, implement, and test code changes end-to-end, helping transform how teams deliver features from concept to production.\n\n- **Issue-to-MR Flow:** Automatically converts issues into actionable merge requests by coordinating agents to analyze requirements, prepare comprehensive implementation plans, and generate code.\n\n- **Convert CI File Flow:** Streamlines migration workflows by having agents analyze existing CI/CD configurations and intelligently convert them to GitLab CI format with full pipeline compatibility.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101941425?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"jenkins-to-gitlab-cicd-for-blog\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Search and Replace Flow:** Discovers and transforms code patterns across codebases by systematically analyzing project structures, identifying optimization opportunities, and executing precise replacements.\n\n- **Incident Response & Root Cause Analysis Flow:** Orchestrates incident response by correlating system data, coordinating specialized agents for root cause analysis, and executing approved remediation steps while keeping human stakeholders informed throughout the resolution process.\n\nThis is where GitLab Duo Agent Platform is taking a truly unique approach versus other AI solutions. We won't just give you pre-built agents. We'll also give you the power to create, customize, and share agent Flows that perfectly match your individual and organization's unique needs. And with Flows, you will then be able to give agents a specific execution plan for common and complex tasks.\n\nWe believe this approach is more powerful than building purpose-built agents like our competitors do, because every organization has different workflows, coding standards, security requirements, and business logic. Generic AI tools can't understand your specific context, but GitLab Duo Agent Platform will be able to be tailored to work exactly how your team works.\n\n## Why build agents and agent Flows in the GitLab Duo Agent Platform?\n\n**Build fast.** You can build agents and complex agent Flows in the Duo Agent Platform quickly and easily using a fast, declarative extensibility model and UI assistance.\n\n**Built-in compute.** With Duo Agent Platform, you no longer have to worry about the hassle of standing up your own infrastructure for agents: compute, network, and storage are all built-in.\n\n**SDLC events.** Your agents can be invoked automatically on common events: broken pipeline, failed deployment, issue created, etc.\n\n**Instant access.** You can interact with your agents everywhere in GitLab or our IDE plug-in: assign them issues, @mention them in comments, and chat with them everywhere Duo Chat is available.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1102029239?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"assigning an agent an issue\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script> \u003Cp>\u003C/p>\n\n**Built-in and custom models supported.** Your agents will have automatic access to all of the models we support, and users will be able to choose specific models for specific tasks. If you want to connect Duo Agent Platform to your own self-hosted model, you will be able to do that too!\n\n**Model Context Protocol (MCP) endpoints.** Every agent and Flow can be accessed or triggered via native MCP endpoints, allowing you to connect to and collaborate with your agents and Flows from anywhere, including popular tools like Claude Code, Cursor, Copilot, and Windsurf.\n\n**Observability and security.** Finally, we provide built-in observability and usage dashboards, so you can see exactly who, where, what, and when agents took actions on your behalf.\n\n## A community-driven future\n\nCommunity contributions have long fueled GitLab's innovation and software development. We're excited to partner with our community with the introduction of the AI Catalog. The AI Catalog will allow you to create and share agents and Flows within your organization and across the GitLab Ecosystem in our upcoming beta.\n\nWe believe that the most valuable AI applications are likely to emerge from you, our community, thanks to your daily application of GitLab Duo Agent Platform to solve numerous real-world use cases. By enabling seamless sharing of agents and Flows, we're creating a network effect where each contribution enhances the platform's collective intelligence and value. Over time, we believe that the most valuable use cases from Agent Platform will come from our thriving GitLab community. \n\n![AI Catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685501/awdwx08udwrxgvcpmssb.png \"AI Catalog\")\n\n## Available today in the GitLab Duo Agent Platform in public beta\n\nThe GitLab Duo Agent Platform public beta is available now to Premium and Ultimate customers with these capabilities:\n\n**Software Development Flow:** Our first Flow orchestrates agents in gathering comprehensive context, clarifying ambiguities with human developers, and executing strategic plans to make precise changes to your codebase and repository. It leverages your entire project, including its structure, codebase, and history, along with additional context like GitLab issues or merge requests to amplify developer productivity.\n\n**New Agent tools available:** Agents now have access to multiple tools to do their work, including:\n\n  - File System (Read, Create, Edit, Find Files, List, Grep)\n  - Execute Command Line*\n  - Issues (List, Get, Get Comments, Edit*, Create*, Add/Update Comments*)\n  - Epics (Get, Get Comments)\n  - MR (Get, Get Comments, Get Diff, Create, Update)\n  - Pipeline (Job Logs, Pipeline Errors)\n  - Project (Get, Get File)\n  - Commits (Get, List, Get Comments, Get Diff)\n  - Search (Issue Search)\n  - Secure (List Vulnerabilities)\n  - Documentation Search\n  \n*=Requires user approval\n\n**GitLab Duo Agentic Chat in the IDE:** Duo Agentic Chat transforms the chat experience from a passive Q&A tool into an active development partner directly in your IDE.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1103237126?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-ai-launch-video_NEW\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\u003Cp>\u003C/p>\n\n- **Iterative feedback and chat history:** Duo Agentic Chat now supports chat history and iterative feedback, transforming the agent into a stateful, conversational partner. This fosters trust, enabling developers to delegate more complex tasks and offer corrective guidance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743173?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"agentic-chat-history\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Streamlined delegation with slash commands:** Expanded, more powerful slash commands, such as /explain, /tests, and /include, create a “delegation language” for quick and precise intent. The /include command allows the explicit injection of context from specific files, open issues, merge requests, or dependencies directly into the agent's working memory, making the agent more powerful and teaching users how to provide optimal context for high-quality responses.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743187?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"include-agentic-chat-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Personalization through custom rules:** New Custom Rules enables developers to tailor agent behavior to individual and team preferences using natural language, for example, development style guides. This foundational mechanism shapes the agent's persona into a personalized assistant, evolving toward specialized agents based on user-defined preferences and organizational policies.\n    \n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743179?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"custom-rules-with-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n- **Support for GitLab Duo Agentic Chat in JetBrains IDE:** To help meet developers where they work, we have expanded Duo Agentic Chat support to the JetBrains family of IDEs, including IntelliJ, PyCharm, GoLand, and Webstorm. This adds to our existing support for VS Code. Existing users get agentic capabilities automatically, while new users can install the plugin from the JetBrains Marketplace.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743193?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"jetbrains-support-jc-voiceover\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n    \n- **MCP client support:** Duo Agentic Chat can now act as an MCP client, connecting to remote and locally running MCP servers. This capability unlocks the agent's ability to connect to systems beyond GitLab like Jira, ServiceNow, and ZenDesk to gather context or take actions. Any service that exposes itself via MCP can now become part of the agent's skill set. The official GitLab MCP Server is coming soon!\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1101743202?title=0&amp;byline=0&amp;portrait=0&amp;badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"McpDemo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n    \n- **GitLab Duo Agentic Chat in GitLab Web UI.** Duo Agentic Chat is also now available directly within the GitLab Web UI. This pivotal step evolves the agent from a coding assistant to a true DevSecOps agent, as it gains access to rich non-code context, such as issues and merge request discussions, allowing it to understand the \"why\" behind the work. Beyond understanding context, the agent can make changes directly from the WebUI, such as automatically updating issue statuses or editing merge request descriptions.\n\n## Coming soon to GitLab Duo Agent Platform\n\nOver the coming weeks, we'll release new capabilities to Duo Agent Platform, including more out-of-the-box agents and Flows. These will bring the platform into the GitLab experience you love today and enable even greater customization and extensibility, amplifying productivity for our customers:\n\n![GitLab Duo Agent Platform public beta roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685275/hjbe9iiu2ydp9slibsc2.png \"GitLab Duo Agent Platform public beta roadmap\")\n\n\n- **Integrated GitLab experience:** Building on the IDE extensions available in 18.2, we're expanding agents and Flows within the GitLab platform. This deeper integration will expand the ways you can collaborate synchronously and asynchronously with agents. You will be able to assign issues directly to agents, @mention them within GitLab Duo Chat, and seamlessly invoke them from anywhere in the application while maintaining MCP connectivity from your developer tool of choice. This native integration transforms agents into true development team members, accessible across GitLab.\n\n- **Agent observability:** As agents become more autonomous, we're building comprehensive visibility into their activity as they progress through Flows, enabling you to monitor their decision-making processes, track execution steps, and understand how they're interpreting and acting on your development challenges. This transparency into agent behavior builds trust and confidence while allowing you to optimize workflows and identify bottlenecks, and helps ensure agents are performing exactly as intended.\n\n- **AI Catalog:** Recognizing that great solutions come from community innovation, we will soon introduce the public beta of our AI Catalog — a marketplace which will allow you to extend Duo Agent Platform with specialized Agents and Flows sourced from GitLab, and over time, the broader community.  You'll be able to quickly deploy these solutions in GitLab, leveraging context across your projects and codebase.\n\n- **Knowledge Graph:** Leveraging GitLab's unique advantage as the system of record for source code and its surrounding context, we're building a comprehensive Knowledge Graph that not only maps files and dependencies across the codebase but also makes that map navigable for users while accelerating AI query times and helping increase accuracy. This foundation enables GitLab Duo agents to quickly understand relationships across your entire development environment, from code dependencies to deployment patterns, unlocking faster and more precise responses to complex questions.\n\n![GitLab Duo Agent Platform Knowledge Graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752685367/n0tvfgorchuhrronic3j.png \"GitLab Duo Agent Platform Knowledge Graph\")\n\n- **Create and edit agents and Flows:** Understanding that every organization has unique workflows and requirements, we're developing powerful agent and Flow creation and editing capabilities that will be introduced as the AI Catalog matures. You'll be able to create and modify agents and Flows to operate precisely the way your organization works, delivering deep customization across the Duo Agent Platform that enables higher quality results and increased productivity. \n\n![AI Catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752684938/fruwqcqvvrx8gmkz5u0v.png \"AI Catalog\")\n\n- **Official GitLab MCP Server:** Recognizing that developers work across multiple tools and environments, we're building an official GitLab MCP server that will enable you to access all of your agents and Flows via MCP. You'll be able to connect to and collaborate with your agents and Flows from anywhere MCP is supported, including popular tools like Claude Code, Cursor, Copilot, and Windsurf, unlocking seamless AI collaboration regardless of your preferred development environment.\n\n- **GitLab Duo Agent Platform CLI:** Our upcoming CLI will allow you to invoke agents and trigger Flows on the command line, leveraging GitLab's rich context across the entire software development lifecycle—from code repositories and merge requests to CI/CD pipelines and issue tracking. \n\n## Get started now\n\n- **GitLab Premium and Ultimate customers** in GitLab.com and self-managed environments using GitLab 18.2 can use Duo Agent Platform immediately (beta and experimental features for GitLab Duo [must be enabled](https://docs.gitlab.com/user/gitlab_duo/turn_on_off/#turn-on-beta-and-experimental-features)). GitLab Dedicated customers will be able to use the Duo Agent Platform with the release of GitLab 18.2 for Dedicated next month.\n\n- Users should download the [VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) or the [JetBrains IDEs plugin](https://plugins.jetbrains.com/plugin/22857-gitlab) and follow our [guide to using GitLab Duo Agentic Chat](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/#use-agentic-chat), including Duo Chat [slash commands](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#gitlab-duo-chat-slash-commands). \n\n**New to GitLab?** See GitLab Duo Agent Platform in action at our Technical Demo, offered in two timezone-friendly sessions: [Americas and EMEA](https://page.gitlab.com/webcasts-jul16-gitlab-duo-agentic-ai-emea-amer.html) and [Asia-Pacific](https://page.gitlab.com/webcasts-jul24-gitlab-duo-agentic-ai-apac.html). To get hands-on with GitLab Duo Agent Platform yourself, sign up for a [free trial](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fsales%2F) today.\n\n\n\u003Csmall>*This blog post contains “forward-looking statements” within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934. Although we believe that the expectations reflected in the forward-looking statements contained in this blog post are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to be materially different from any future results or outcomes expressed or implied by the forward-looking statements.*\n\n*Further information on risks, uncertainties, and other factors that could cause actual outcomes and results to differ materially from those included in or contemplated by the forward-looking statements contained in this blog post are included under the caption “Risk Factors” and elsewhere in the filings and reports we make with the Securities and Exchange Commission. We do not undertake any obligation to update or release any revisions to any forward-looking statement or to report any events or circumstances after the date of this blog post or to reflect the occurrence of unanticipated events, except as required by law.*\u003C/small>\n",{"featured":91,"template":693,"slug":1671},"gitlab-duo-agent-platform-public-beta","content:en-us:blog:gitlab-duo-agent-platform-public-beta.yml","Gitlab Duo Agent Platform Public Beta","en-us/blog/gitlab-duo-agent-platform-public-beta.yml","en-us/blog/gitlab-duo-agent-platform-public-beta",{"_path":1677,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1678,"content":1681,"config":1687,"_id":1689,"_type":14,"title":1690,"_source":16,"_file":1691,"_stem":1692,"_extension":19},"/en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops",{"noIndex":6,"title":1679,"description":1680},"GitLab Duo Agent Platform: What’s next for intelligent DevSecOps","GitLab Duo Agent Platform, a DevSecOps orchestration platform for humans and AI agents, leverages agentic AI for collaboration across the software development lifecycle.",{"heroImage":1682,"title":1679,"description":1680,"authors":1683,"date":1684,"body":1685,"category":10,"tags":1686},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750687578/esmflevxk5bf3eezjhwk.png",[1568],"2025-06-24","I’m thrilled to introduce the next evolution of Duo Workflow: GitLab Duo Agent Platform. This innovative platform extends agentic capabilities across the software development lifecycle, enabling teams to work in parallel with multiple AI agents.\n\nImagine starting your day like this:\n\n* You assign one AI agent to conduct deep research on an epic your team is working on, provide the latest updates on all contributions from the past week, and suggest a release post based on recent feature additions.\n* In parallel, you delegate a handful of accessibility bugs to several agents for analysis and to make the necessary code changes to resolve them.\n* Meanwhile, you ask another agent to review your complicated code changes and provide feedback before sending them to your teammate for formal review.\n* Finally, when the security team pings you about a new vulnerability that needs investigation across your entire project, you hand that research task to your security agent.\n\nAll of this happens simultaneously, while you focus on architecture decisions, creative problem-solving, and strategic technical work. GitLab Duo Agent Platform will let you delegate tasks to five, 10, or even 100 specialized agents — all with full context of your project, not just your code, including CI job logs, planning work items, and so much more. You’re automating the tedious work you have to do, so you can focus on the work that inspires you.\n\n**This isn't about replacing developers. It's about amplifying human creativity and expertise by removing the friction from routine tasks.** That’s the future we’re building with GitLab Duo Agent Platform.\n\n## What is GitLab Duo Agent Platform?\n\nGitLab Duo Agent Platform will enable many-to-many collaboration between engineers and [AI agents](https://about.gitlab.com/topics/agentic-ai/) across the full software development lifecycle, designed to help teams dramatically improve productivity and cycle time.\n\nBuilt on GitLab’s secure foundation, GitLab Duo Agent Platform is customizable and extendable. It empowers developers to build agents to tackle all kinds of software engineering problems, leveraging context across your entire software development lifecycle.\n\nGitLab Duo Agent Platform will go beyond code creation with specialized agents and custom workflows that can help with a nearly unlimited list of activities, including:\n\n* Issue implementation\n* Large-scale migrations/dependency upgrades\n* Automated documentation building/release posts\n* Fixing broken pipelines\n* Incident research support\n* Deep research of status and information on topics\n* Backlog administration\n* Vulnerability resolution\n* Reviews for specific types of code (e.g. database)\n* Quick internal tool building based on existing build blocks\n* and many more!\n\nYou will be able to use our agents out of the box as well as customize and extend them. We’re currently beta testing GitLab Duo Agent Platform with dozens of customers and will open beta access to more teams soon.\n\nWatch GitLab Duo Agent Platform in action:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1095679084?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Platform Demo Clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Choose your tools, your models, and your agents\n\nConsistent with GitLab’s commitment to being an open platform, GitLab agents will seamlessly interoperate with your choice of code-authoring developer tools via standard model context protocol (MCP) and the agent-to-agent (A2A) framework, whether you’re using Cursor, Claude Code, Windsurf, OpenAI Codex, or others.\n\nThe platform will accept code contributions from any development tool in your stack, whether that code was written by a human developer or generated by an AI agent. This means your existing workflows and preferred tools will continue to work seamlessly as you integrate agent capabilities.\n\nGitLab Duo Agent Platform will work with any approved language model that [meets our selection criteria](https://about.gitlab.com/ai-transparency-center/#ai-continuity-plan). For organizations with strict security requirements, it will support approved self-hosted models running in completely air-gapped environments. Your infrastructure requirements and security policies won’t limit your ability to benefit from agentic development.\n\n## Context is everything, and your GitLab Duo agents have it\n\nThe difference between a helpful AI tool and a truly intelligent agent comes down to context. With GitLab Duo Agent Platform, agents don't work in isolation — they're deeply integrated into the platform where development work happens.\n\nEvery agent will automatically understand the full picture of your projects, including your open issues and their history, the merge requests that resolved them, the structure and rationale behind your code, your CI/CD pipeline configurations, security findings, compliance requirements, and the intricate relationships between all these components.\n\nJust like your human team members, agents have all the context to help you ship secure software faster. Instead of just answering questions about code, they will be able to provide insights about how a proposed change might affect your deployment pipeline or suggest security improvements based on your existing compliance rules. We believe that the more your team works within GitLab’s DevSecOps platform, the smarter your agents will become.\n\n## Stay in control while agents scale your team\n\nBuilding trust with AI agents isn't fundamentally different from building trust with new team members. You need to see their work, understand their approach, and gradually increase their responsibilities as they prove their competence.\n\nThat's the philosophy behind our agent approval workflow. Before any agent makes changes to your code or environment, it will present you with a clear plan: what it understands about the issue, the approach it will take, and the specific actions it wants to perform. You’ll then get the opportunity to review, approve, or redirect as needed. Over time, as agents consistently deliver quality work, you will be able to grant them greater autonomy for routine tasks while maintaining oversight for complex or critical work.\n\n## Built for community and customization\n\nGitLab has always thrived on community contributions, and this year marked a milestone with record-breaking customer contributions to our platform. Now we're extending that same collaborative energy to AI agents through our open framework approach.\n\nGitLab Duo Agent Platform isn't just about the agents we build — it's about empowering you and the broader community to create specialized agents that solve your unique engineering challenges. Whether you need an agent that understands your specific coding standards, integrates with your custom toolchain, or handles domain-specific tasks, the platform will give you the building blocks to make it happen.\n\nThis community-driven model creates a virtuous cycle that leverages the strength of the GitLab community through global sharing, similar to our [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). Diverse real-world use cases drive innovation. Enterprise feedback ensures reliability and security. And shared solutions benefit everyone. It's the same collaborative approach that has made GitLab successful, now applied to the frontier of agentic development.\n\n## How to get started\n\nIf you've been experimenting with [GitLab Duo Agentic Chat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/), now included with every GitLab 18 Premium and Ultimate GitLab.com user license, you've already gotten a taste of what's possible with AI agents in your development workflow.\n\nTo see what GitLab Duo Agent Platform can do and what we’re working on, check out the [demos in the recording of our annual GitLab 18 release event](https://about.gitlab.com/eighteen/).\n\nWant to be among the first to experience it? Sign up for the GitLab [Duo Agent Platform beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/). This summer, we'll be opening access to more teams, with new agent features coming out in GitLab 18's upcoming releases throughout the year. We expect general availability this winter.\n\n*Disclaimer: This presentation contains information related to upcoming products, features, and functionality. It is important to note that the information in this presentation is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this presentation and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.*\n\n## Learn more\n\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [DevOps automation and AI agents](https://about.gitlab.com/topics/agentic-ai/devops-automation-ai-agents/)\n- [AI-augmented software development: Agentic AI for DevOps](https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/)\n- [AI-driven code analysis: The new frontier in code security](https://about.gitlab.com/topics/agentic-ai/ai-code-analysis/)",[9,716,1181,738],{"featured":91,"template":693,"slug":1688},"gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops","content:en-us:blog:gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops.yml","Gitlab Duo Agent Platform What Is Next For Intelligent Devsecops","en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops.yml","en-us/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops",{"_path":1694,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1695,"content":1701,"config":1706,"_id":1708,"_type":14,"title":1709,"_source":16,"_file":1710,"_stem":1711,"_extension":19},"/en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"title":1696,"description":1697,"ogTitle":1696,"ogDescription":1697,"noIndex":6,"ogImage":1698,"ogUrl":1699,"ogSiteName":706,"ogType":707,"canonicalUrls":1699,"schema":1700},"GitLab Duo + Amazon Q: Transform ideas into code in minutes","The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097127/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750097126673.png","https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo + Amazon Q: Transform ideas into code in minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-28\",\n      }",{"title":1696,"description":1697,"authors":1702,"heroImage":1698,"date":1703,"body":1704,"category":10,"tags":1705},[733],"2025-04-28","Have you ever spent days or even weeks converting a complex issue into working code? We've all been there. You start with a solid idea and a clear set of requirements, but the path from that initial concept to deployable code can be frustratingly long. Your productivity gets bogged down in implementation details, and projects that should move quickly end up dragging on.\n\nThis is where the power of [agentic AI](https://about.gitlab.com/topics/agentic-ai/) capabilities comes in. [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which combines the comprehensive AI-powered DevSecOps platform with the deepest set of cloud computing capabilities, is designed to dramatically accelerate your application development process, all within your familiar GitLab workflow. By streamlining your path from idea to deployment, this powerful integration can propose implementation solutions based on your issue descriptions alone – transforming what used to take days into something that happens in minutes.\n\n## How it works: From issue to working code\n\nLet's walk through how this agentic AI feature works in practice. Imagine you're a developer tasked with creating a mortgage calculator application. Here's how GitLab Duo with Amazon Q helps you get it done:\n\n1. **Create an issue with detailed requirements:** Start by creating a standard [GitLab issue](https://docs.gitlab.com/user/project/issues/). In the description, you'll provide a comprehensive list of requirements that your service needs to meet. This becomes the blueprint for your solution.\n\n2. **Invoke Amazon Q with a quick action:** Once your issue is created, simply add a comment with a quick action, “/q dev”, to invoke Amazon Q. This is where the magic begins. \n\n3. **Let AI generate your implementation:** GitLab Duo with Amazon Q analyzes the issue description you've provided and the context of your source code, then autonomously generates code that meets all your stated requirements. It doesn't stop there – it actually commits those changes in a merge request, ready for your review.\n\n![GitLab Duo  with Amazon Q activity pop-up screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097156/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097156018.png)\n\n4. **Review the generated application**: Navigate into the merge request to review the generated code. You can verify that all your requirements have been met and make any necessary adjustments.\n\n5. **Test the proposed application**: Finally, check that the application runs successfully. With minimal effort on your part, you now have working code that implements your original requirements.\n\n## Improve your development process\n\nGitLab Duo with Amazon Q completely transforms this process, including dramatically decreasing the time it takes to carry out complex developer tasks, through intelligent automation. By leveraging an agentic AI approach, you can accelerate your path from idea to deployment, freeing development teams to focus on more strategic work.\n\nWith GitLab Duo and Amazon Q, you'll develop software faster, more efficiently, and with less manual coding effort. This integration helps you:\n\n* **Save valuable development time** by automating implementation based on requirements  \n* **Maintain consistency** in code generation across your projects  \n* **Reduce the cognitive load** of translating requirements into working code  \n* **Accelerate your release cycles** by removing implementation bottlenecks  \n* **Focus your expertise** on reviewing and optimizing, rather than writing boilerplate code\n\nReady to see GitLab Duo with Amazon Q in action? Watch our demo video to discover how you can transform your development workflow today.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jxxzNst3jpo?si=j_LQdZhUnwqoQEst\" title=\"GitLab Duo with Amazon Q demo video for dev workflow\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[9,835,737,496,738,233],{"slug":1707,"featured":91,"template":693},"gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","content:en-us:blog:gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","Gitlab Duo Amazon Q Transform Ideas Into Code In Minutes","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"_path":1713,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1714,"content":1720,"config":1726,"_id":1728,"_type":14,"title":1729,"_source":16,"_file":1730,"_stem":1731,"_extension":19},"/en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"title":1715,"description":1716,"ogTitle":1715,"ogDescription":1716,"noIndex":6,"ogImage":1717,"ogUrl":1718,"ogSiteName":706,"ogType":707,"canonicalUrls":1718,"schema":1719},"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant","In this first article in our series learn how Chat can improve developer productivity – for example, by summarizing issues – and how to improve prompts to get better answers faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099126/Blog/Hero%20Images/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B%20%281%29_6a2UB7TOQk3JKxyb5yqYtc_1750099126039.png","https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-05-29\",\n      }",{"title":1715,"description":1716,"authors":1721,"heroImage":1717,"date":1723,"body":1724,"category":10,"tags":1725},[1722],"Abubakar Siddiq Ango","2024-05-29","GitLab Duo Chat became [generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) in [GitLab 16.11](https://about.gitlab.com/releases/2024/04/18/gitlab-16-11-released/) and its power as a personal assistant can not be overstated. On a DevSecOps platform, more has to happen than just generating code; planning, discussions, security, compliance, and technical reviews are all critical to developing secure software faster. Issues, epics, merge requests, and other sections of GitLab are where this work happens, with knowledge often buried deep in comment threads. It can take a lot of time to get up to speed on these threads, especially when they've grown to hundreds of comments and interactions and when you've been away from them for a while. This is where GitLab Duo Chat can help.\n\nIn this first part of our GitLab Duo Chat 101 series, we'll introduce you to Chat's capabilities and then dig into how to use Chat to summarize comment threads.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## GitLab Duo Chat's capabilities\n\nWith Chat, you can refactor [existing code](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide), learn how a [block of code works](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide), and write [tests](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide) for your code, learn about your issues and epics, and much more. Depending on your prompts, you can make Chat do impressive things that boost developer productivity. In the video below, I showcased how you can use GitLab Duo Chat to interact with GitLab and learn about your issues and epics.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/RJezT5_V6dI?si=XlXGs2DHAYa8Awzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Use cases   \n\nGitLab Duo Chat’s capabilities allow for productivity gains across multiple parts of the software development lifecycle:\n\n- Product and project managers can use Chat’s issues and epics capabilities to gain insights into discussions and plan faster.\n- Developers can create solutions faster with code suggestions and refactoring capabilities. When it comes to working with legacy code or code from other team members, less time is spent on research with the ` /explain` capability providing the necessary insights to understand the code.\n- Quality assurance and test engineers can generate tests and check for vulnerabilities\n- New employees can get a better understanding of their code base and get started solving problems.\n- Beginner programmers can understand and pick up a language or framework quickly and create solutions with Chat providing next steps and insights.\n\n> Check out \"[10 best practices for using GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\" for tips and tricks to craft AI prompts. \n\n## Summarizing issues\n\nWhen you encounter an issue, especially one with a lot of comments, you skim through the issue description, along with a couple of comments, but can't always get the complete picture of the conversations. GitLab Duo Chat can get you up to speed fast. In the image below, I asked Chat to summarize an issue along with a follow-up question. In two prompts, I got what I needed to understand what is going on in the issue without spending hours reading through the comments.\n\n![Chat summarizing an issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099137154.png)\n\nYou can use GitLab Duo Chat on the GitLab interface, as well as [the WebIDE, Visual Studio Code, and JetBrains interfaces](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#use-gitlab-duo-chat-in-the-web-ide).\n\n## Prompts and context\n\nGetting the best responses from any AI tool requires carefully crafting the questions asked. Sometimes, you need to give examples of the responses you expect to prime the large language models (LLMs) toward a desired response. Here are some areas to focus on to get desired responses.\n\n### Context\n\nHere are three prompts with similar objectives but worded differently:\n\n| Prompt 1     | Prompt 2     | Prompt 3    |\n| ---------- | ---------- | ---------- |\n| ![Prompt: Can you summarize this issue's description?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099137154.png)       | ![Prompt: Can you provide a high-level summary of this issue?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099137155.png)      | ![Prompt: Why is this issue popular?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099137156.png)      |\n\nThe context, “this issue,” is common among the three prompts; this tells Chat what resource to use in looking for answers. Prompt 1 gives additional context on what to focus on: the description of the issue. Prompt 2 is not limited in its scope, which means the LLMs will spend more time going through the description and all the comments to provide a more detailed summary of the whole issue. (Note: As of the publication of this blog, there were more than 90 comments in that issue.) Prompt 3 got a poorer response because not much expectation was set for the type of response expected. \n\n[Low-context communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication) is critical in crafting your prompt for the best responses, as all information needed for the LLMs to provide an informed response is provided.\n\n### Simplicity\n\nThe wordiness of prompts can sometimes lead to incorrect or no responses. In the image below, you can see that rephrasing a prompt from “Customers have mentioned why this issue is important to them. Can you list the top 3 reasons they mentioned?” to “Why is this issue important to customers?” led to the expected response. When you don’t get the response you desire, simplifying or changing the words used in your prompt can improve the quality of responses.\n\n![Wordy Chat prompts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099137158.png)\n\n### Follow-up questions\n\nGitLab Duo Chat can have follow-up conversations – an essential capability. In the image below, I continued asking how the issue in question can be solved in GitLab's code along with a follow-up question asking for code samples.\n\n![Streamlined Chat prompt shown](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099137158.png)\n\nFollow-up questions allow the application to maintain context and provide faster responses. A recommendation is to provide parts of Chat’s previous responses in the next prompt. In the example above, I mentioned “Rails App,” as previously suggested. \n\n## Get started with GitLab Duo Chat \n\nGitLab Duo Chat does more than help you write better code, it helps you navigate through problems and quickly find solutions. With the right prompts and context, you can build secure software faster.\n\n> Want to try GitLab Duo Chat? [Start your free trial](https://about.gitlab.com/gitlab-duo/#free-trial) today.",[9,496,716,737],{"slug":1727,"featured":91,"template":693},"gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","content:en-us:blog:gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","Gitlab Duo Chat 101 Get More Done On Gitlab With Our Ai Assistant","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"_path":1733,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1734,"content":1739,"config":1744,"_id":1746,"_type":14,"title":1747,"_source":16,"_file":1748,"_stem":1749,"_extension":19},"/en-us/blog/gitlab-duo-chat-beta",{"title":1735,"description":1736,"ogTitle":1735,"ogDescription":1736,"noIndex":6,"ogImage":929,"ogUrl":1737,"ogSiteName":706,"ogType":707,"canonicalUrls":1737,"schema":1738},"Say hello to GitLab Duo Chat: A new level of AI-assisted productivity","Learn how GitLab Duo Chat, releasing Nov. 16 in Beta, can help elevate your coding skills, streamline onboarding, and supercharge team efficiency.","https://about.gitlab.com/blog/gitlab-duo-chat-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Say hello to GitLab Duo Chat: A new level of AI-assisted productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":1735,"description":1736,"authors":1740,"heroImage":929,"date":1741,"body":1742,"category":10,"tags":1743},[1122],"2023-11-09","\n[GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html), the newest feature in the GitLab Duo suite of AI-assisted capabilities, helps teams write and understand code faster, get up to speed on the status of projects, and quickly learn GitLab. Chat will be available in Beta starting November 16.\n\nChat also will be included in our [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) and [GitLab Workflow extension for VS Code](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/) as an experimental release. Get code explanations, generate tests, and create code right where your development work happens; no need for context switching. \n\nWe launched the [GitLab Duo suite](https://about.gitlab.com/gitlab-duo/) earlier this year to bolster software development teams' workflow, helping you deliver more secure software at an unprecedented pace and create more value for your customers. GitLab is the only platform that brings AI-powered planning tools, code creation, security scanning, and vulnerability remediation all into a single developer-friendly experience.\n\nChat will serve as the foundational technology driving our AI-powered GitLab Duo features, such as Vulnerability Summary and Root Cause Analysis. This will enable you to pose in-context follow-up questions for a more thorough investigation.\n\n> [Contact our sales team](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started with GitLab Duo Chat.\n\nIn our [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai), 83% of respondents said that implementing AI in their software development processes is essential to avoid falling behind, and they ranked chatbots among their top three AI use cases. Chat, which will be released for Ultimate tier customers as part of GitLab 16.6, is the perfect feature to help you maintain your competitive advantage. Here's why.\n\n## Designed to support everyone across the software development process \nFrom coding assistance to productivity tips, Chat provides real-time support for technical and non-technical users across the entire software development lifecycle. \n\n- **Inspiration on demand:** Need help determining the next best step in your workflow? Chat is ever-ready to ignite your ideation process.\n\n- **Elevate productivity:** Chat shoulders the burden of routine tasks so you can channel your energy into delivering value to your customers. \n\n- **Guidance at every step:** Whether you're a GitLab expert or a newcomer, Chat is your go-to coach, helping you become an expert in any process or feature.\n\nSometimes, there isn't enough time in the day, and even though you want to focus on important tasks, there are many little things you need to do. \n\n- **Code assistance:** Chat can assist in decoding the mysteries of unfamiliar code. It can explain,  propose tests, or simplify the code. You can also use Chat to write code from scratch interactively.\n\n![Chat in Web IDE](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatbetaissueepicmanagement.gif){: .shadow}\n\n- **Issue and epic management:** Summarize an issue in seconds, turn comments into an issue description, or distill specific information from large epics easily. You can ask any question about the content of an epic or an issue. Plan work with issues and epics faster with the help of Chat. \n\n![Chat issue and epic management example](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatinideexperimentalrelease.gif){: .shadow}\n\n- **Onboarding and learning made simple:** Whether you are onboarding to GitLab or you are already an expert learning how to use GitLab is streamlined with Chat.\n\n![Chat learning example](https://about.gitlab.com/images/blogimages/2023-11-09-chat-beta/chatbetareleaselearninglong.gif){: .shadow}\n\n## How your data stays your data  \nChat does not use your proprietary code or inputs to Chat as training data. This privacy-first approach includes both the prompt and the output of Chat. GitLab Duo uses the right large language models (LLMs) for each use case. For instance, Anthropic Claude-2 and Vertex AI Codey with text embedding-gecko LLMs power Chat. Our [publicly available documentation](https://docs.gitlab.com/ee/user/ai_features.html) describes all AI models GitLab Duo uses and [how it uses your data](https://docs.gitlab.com/ee/user/ai_features.html#data-privacy). \n\n## The road ahead for GitLab Duo  \t\nAs we continue to innovate and improve GitLab Duo, we're excited to share that our [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) capability will transition from Beta to general availability later this year. We look forward to seeing the transformative impact GitLab Duo will have on your software development efforts. Learn more about GitLab Duo Chat in [our documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) and [share your feedback and ideas](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). \n\nTo get started with GitLab Duo Chat, please [contact our sales team](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,716,1181],{"slug":1745,"featured":6,"template":693},"gitlab-duo-chat-beta","content:en-us:blog:gitlab-duo-chat-beta.yml","Gitlab Duo Chat Beta","en-us/blog/gitlab-duo-chat-beta.yml","en-us/blog/gitlab-duo-chat-beta",{"_path":1751,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1752,"content":1758,"config":1763,"_id":1765,"_type":14,"title":1766,"_source":16,"_file":1767,"_stem":1768,"_extension":19},"/en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements",{"title":1753,"description":1754,"ogTitle":1753,"ogDescription":1754,"noIndex":6,"ogImage":1755,"ogUrl":1756,"ogSiteName":706,"ogType":707,"canonicalUrls":1756,"schema":1757},"GitLab Duo Chat: Get to know productivity-boosting AI enhancements","Learn about Chat's new capabilities, including migration to Claude 3.5 Sonnet, new slash command helpers, and the integration of Root Cause Analysis and Explain Vulnerability features.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098629/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_77JeTV9gAmbXM0224acirV_1750098628882.png","https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat: Get to know productivity-boosting AI enhancements\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jannik Lehmann\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-10-03\",\n      }",{"title":1753,"description":1754,"authors":1759,"heroImage":1755,"date":1760,"body":1761,"category":10,"tags":1762},[1219,1220],"2024-10-03","At GitLab, we [continuously strive to enhance your experience with GitLab Duo Chat]((https://gitlab.com/gitlab-org/gitlab/-/issues/430124)), our AI-powered assistant designed to streamline your development workflows. In this article, we share a series of significant updates that bring even more power, precision, and functionality to GitLab Duo Chat. \n\n## Migration to Claude 3.5 Sonnet\n\nWe are thrilled to announce a major upgrade for GitLab Duo Chat: [the migration of its underlying Large Language Model from Claude 3 Sonnet to the more advanced Claude 3.5 Sonnet](https://gitlab.com/gitlab-org/gitlab/-/issues/468334). This new model brings substantial performance enhancements, offering superior accuracy, context-awareness, and efficiency in AI-driven conversations.\n\nWith Claude 3.5 Sonnet powering GitLab Duo Chat, users can expect more precise and relevant responses. This upgrade ensures Chat remains at the forefront of AI technology, helping your team work more effectively in their daily workflows.\n\n![Screenshot of Chat in VS Code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098636137.png)\n\nNotice the [code block syntax highlighting](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/1435) in Chat in VS Code.\n\n## Bringing the Slash Command Picker to Chat in the GitLab UI\n\nTo further improve the discovery of GitLab Duo Chat slash commands and make them more quickly accessible to our users, [we’ve introduced the Slash Command Picker UI](https://gitlab.com/gitlab-org/gitlab/-/issues/470703). Now, when you start typing a prompt with `/` in Chat in the GitLab UI, the available slash commands relevant to your current context will be automatically displayed. \n\nThis feature enhances your workflow and acts as the foundation for a growing platform of AI-powered capabilities that we plan to expand in the near future.\n\n![GitLab Duo Chat Slash Command Picker](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098636138.png)\n\n## Root Cause Analysis integration\n\nGitLab Duo Chat is gaining another powerful feature: [Root Cause Analysis](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#troubleshoot-failed-cicd-jobs-with-root-cause-analysis). \n\nThis integration allows you to maintain context within Chat while investigating failed pipeline jobs, making it easier to ask follow-up questions and explore the root causes of problems.\n\nYou can access Root Cause Analysis by clicking the \"Troubleshoot\" button at the end of the job log, or you can select the job log portions and then ask Chat with the `/troubleshoot` slash command. With this seamless integration, you have the tools you need to resolve issues more efficiently.\n\n![Root Cause Analysis example in Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098636140.png)\n\n## Fix code in the IDE\n\nOne of the latest enhancements to GitLab Duo Chat is the ability to ask it to [fix selected code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#fix-code-in-the-ide) within your IDE. This feature, introduced in GitLab 17.3, is available in the Web IDE, VS Code, and JetBrains IDE. It allows you to make specific code fixes by selecting portions of code and using the /fix slash command.\n\nFor example, you can instruct Chat to:\n- Fix grammar mistakes and typos with `/fix grammar mistakes and typos`.\n- Address performance issues using `/fix performance problems`.\n- Solve specific bugs or algorithm-related issues with commands like `/fix duplicate database inserts` or `/fix race conditions`.\n- Resolve code compilation errors with `/fix the build`.\n\nThis feature is designed to help developers quickly resolve common coding issues and improve the quality of their code, all while staying within their familiar IDE environment.\n\nHere is an example for fixing grammar mistakes and improving the language of (code) comments.\n\n![An example for fixing grammar mistakes](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098636142.png)\n\nHere is an example for fixing C code to print the disk usage. Chat correctly suggests missing header includes and provides more help to avoid additional bugs. The source code is available in [the GitLab Duo challenge project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/fix-c-cli-perf).\n\n![Chat enhancements - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098636144.png)\n\n## Explain Vulnerability now in GitLab Duo Chat\n\nAnother highly popular AI-powered feature, [Explain Vulnerability, has been integrated into GitLab Duo Chat](https://gitlab.com/groups/gitlab-org/-/epics/13309). This addition allows you to explore vulnerability details in depth while keeping your Chat context intact. You can ask follow-up questions and engage in more comprehensive discussions directly within the chat environment. You can access this feature by viewing a SAST vulnerability in your project’s Vulnerability Report.\n\nCurrently, this feature supports results from SAST scanners, including [Advanced SAST](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html), with plans to extend support to additional scanners soon.\n\n![Sample Vulnerability Report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098636/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098636148.png)\n\n## What's next?\n\nWe're continuously improving GitLab Duo Chat. Some areas we're exploring include:\n- Context is important. We’re prioritizing the integration of [commits](https://gitlab.com/gitlab-org/gitlab/-/issues/468460), [pipeline jobs](https://gitlab.com/gitlab-org/gitlab/-/issues/468461), and [merge requests](https://gitlab.com/gitlab-org/gitlab/-/issues/464587) into Chat’s contextual scope. Additionally, we are looking into [terminal assistance with Chat](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/1423). This expansion will allow Chat to provide more informed and relevant responses based on a broader range of data.\n- Introduce the `/help` slash command. To make navigating Chat’s AI-powered features even more intuitive, we started development on [a /help slash command](https://gitlab.com/gitlab-org/gitlab/-/issues/462122). This new feature will guide users through the available commands and capabilities for easier and faster access to the tools you need.\n- Make Chat available in [supported IDEs](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#supported-editor-extensions). You can follow the development work for Visual Studio in [this epic](https://gitlab.com/groups/gitlab-org/editor-extensions/-/epics/22). \n\nWe look forward to [hearing your feedback on these enhancements](https://gitlab.com/gitlab-org/gitlab/-/issues/430124). Stay tuned for more updates as we continue to evolve [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/).\n\n> Get started with GitLab Duo Chat today by [signing up for a free  trial](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?toggle=gitlab-duo-pro).",[9,716,717,738],{"slug":1764,"featured":6,"template":693},"gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements","content:en-us:blog:gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements.yml","Gitlab Duo Chat Get To Know Productivity Boosting Ai Enhancements","en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements.yml","en-us/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements",{"_path":1770,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1771,"content":1777,"config":1782,"_id":1784,"_type":14,"title":1785,"_source":16,"_file":1786,"_stem":1787,"_extension":19},"/en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"title":1772,"description":1773,"ogTitle":1772,"ogDescription":1773,"noIndex":6,"ogImage":1774,"ogUrl":1775,"ogSiteName":706,"ogType":707,"canonicalUrls":1775,"schema":1776},"GitLab Duo Chat gets agentic AI makeover  ","Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099203/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2820%29_2bJGC5ZP3WheoqzlLT05C5_1750099203484.png","https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat gets agentic AI makeover  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2025-05-29\",\n      }",{"title":1772,"description":1773,"authors":1778,"heroImage":1774,"date":1779,"body":1780,"category":10,"tags":1781},[1122],"2025-05-29","Generative AI chat assistants have become standard in software development,\nhelping create and fix code just to start. But what if your chat assistant\ncould understand the artifacts of your entire development process, not just\nyour code? What if that chat assistant could help you work through issues\nand project documentation before it helps you write code, and could access\nCI/CD pipelines and merge requests to help you finish coding tasks\nproperly? \n\n\n**Meet the next generation of GitLab Duo Chat – GitLab Duo Agentic Chat, a\nsignificant evolution in AI-native development assistance and the newest\naddition to our platform, now in [experimental\nrelease](https://docs.gitlab.com/policy/development_stages_support/#experiment).**\nGitLab Duo Agentic Chat is currently available as an experimental feature in\nVS Code to all users on GitLab.com that have any one of these add-ons:\nDuo Core, Duo Pro, or Duo Enterprise.\n\n\nAgentic Chat transforms chat from traditional conversational AI to a chat\nexperience that takes action on your behalf, breaking down complex problems\ninto discrete tasks that it can complete. Instead of simply responding to\nquestions with the context you provide, Agentic Chat can:\n\n\n* **Autonomously determine** what information it needs to answer your\nquestions  \n\n* **Execute a sequence of operations** to gather that information from\nmultiple sources  \n\n* **Formulate comprehensive responses** by combining insights from across\nyour project  \n\n* **Create and modify files** to help you implement solutions\n\n\nAnd all of this is done while keeping the human developer within the loop.\n\n\nAgentic Chat is built on the Duo Workflow architecture, which is [currently\nin private\nbeta](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/).\nThe architecture comprises agents and tools that take on specific tasks like\nfinding the right context for a given question or editing files. \n\n\n**Use cases for GitLab Duo Agentic Chat**\n\n\nHere are some real-world and common use cases for Agentic Chat:\n\n\n* Onboard to new projects faster by having AI help you familiarize yourself\nwith a new codebase.\n\n\n* Jump into assigned work immediately, even when issue descriptions are\nunclear, because Agentic Chat can help you connect the dots between\nrequirements and existing implementations.\n\n\n* When it's time to make changes, Agentic Chat can handle the implementation\nwork by creating and editing multiple files across your project.\n\n\n* At release time, Agentic Chat can help you verify that your solution\nactually addresses the original requirements by analyzing your merge\nrequests against the initial issue or task.\n\n\n![agentic chat -\nexample](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099210/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099210429.png)\n\n\n\u003Ccenter>\u003Ci>Agentic Chat making code edits\u003C/i>\u003C/center>\n\n\n## From learning to shipping: A complete workflow demonstration in four\nsteps\n\n\nTo show how Agentic Chat transforms the development experience, let's walk\nthrough a real scenario from our engineering teams. Imagine you're a new\nteam member who's been assigned an issue but knows nothing about the\ncodebase. You can follow along with this video demonstration:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uG9-QLAJrrg?si=kaOhYylMIaWkIuG8j\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n**Step 1: Understand the project**\n\n\nInstead of manually exploring files and documentation, you can prompt\nAgentic Chat:\n\n\n```unset\n\nI am new to this project. Could you read the project structure and explain\nit to me?\n\n```\n\n\nAgentic Chat provides a comprehensive project overview by:  \n\n- Exploring the directory structure  \n\n- Reading README files and documentation  \n\n- Identifying key components and applications\n\n\n**Step 2: Understand your assigned task**\n\n\nNext, you need to understand your specific assignment, so you can enter this\nprompt:\n\n\n```unset\n\nI have been assigned Issue 1119. Could you help me understand this task,\nspecifically where do I need to apply the refactoring?\n\n```\n\n\nAgentic Chat explains the task and proposes a refactoring approach by:\n\n- Retrieving and analyzing the issue details from the remote GitLab server  \n\n- Examining relevant project files  \n\n- Identifying the specific locations requiring changes\n\n\n**Step 3: Implement the solution**\n\n\nRather than doing the work manually, you can request:\n\n\n```unset\n\nCould you make the edits for me? Please start with steps one, two, three.\n\n```\n\n\nAgentic Chat then:  \n\n- Creates new directories and files as needed \n\n- Extracts and refactors code across multiple locations  \n\n- Ensures consistency across all modified files  \n\n- Provides a summary of all changes made\n\n\n**Step 4: Verify completion**\n\n\nFinally, after creating your merge request, you can verify your work:\n\n\n```unset\n\nDoes my MR fully address Issue 1119? \n\n```\n\n\nAgentic Chat confirms whether all requirements have been met by analyzing\nboth your merge request and the original issue.\n\n\n## Try it today and share your feedback\n\n\nGitLab Duo Agentic Chat is currently available as an experimental feature in VS Code to all users on GitLab.com that have any one of these add-ons: Duo Core, Duo Pro, or Duo Enterprise. See our [setup\ndocumentation](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/)\nfor prerequisites and configuration steps.\n\n\nAs an experimental feature, Agentic Chat has some known limitations we're\nactively addressing, including slower response times due to multiple API\ncalls, keyword-based rather than semantic search, and limited support for\nnew local folders or non-GitLab projects. **Your feedback is crucial in\nhelping us prioritize improvements and bring Agentic Chat to general\navailability so please share your experience in [this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/542198).**\n\n\n## What's next?\n\n\nWe are fully focused on improving Agentic Chat, including bringing it to\ngeneral availability. In the meantime, we are aiming to improve response\ntimes and are adding capabilities that GitLab Duo Chat currently has, such\nas using self-hosted models or supporting JetBrains and Visual Studio in\naddition to VS Code. Once we have switched Duo Chat to this new architecture\nwe plan to also bring Agentic Chat to the chat in the GitLab web\napplication. We also plan to add a lot more functionality, such as editing\nGitLab artifacts, supporting context from custom Model Context Protocol, or\nMCP, servers, and offering commands to run in the terminal.\n\n\n> Ready to experience autonomous development assistance but not yet a GitLab\ncustomer? Try Agentic Chat today as part of [a free trial of GitLab\nUltimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and help\nshape the future of AI-powered development. Follow these [setup steps for VS\nCode](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/#use-agentic-chat-in-vs-code).\n\n>\n\n> And make sure to join the GitLab 18 virtual launch event to learn about\nour agentic AI plans and more. [Register\ntoday!](https://about.gitlab.com/eighteen/)\n\n\n***Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab.***\n\n\n## Learn more\n\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic\nAI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n\n- [Agentic AI guides and\nresources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n",[9,1181,717,496,738,737],{"slug":1783,"featured":91,"template":693},"gitlab-duo-chat-gets-agentic-ai-makeover","content:en-us:blog:gitlab-duo-chat-gets-agentic-ai-makeover.yml","Gitlab Duo Chat Gets Agentic Ai Makeover","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover.yml","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"_path":1789,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1790,"content":1796,"config":1801,"_id":1803,"_type":14,"title":1804,"_source":16,"_file":1805,"_stem":1806,"_extension":19},"/en-us/blog/gitlab-duo-chat-now-generally-available",{"title":1791,"description":1792,"ogTitle":1791,"ogDescription":1792,"noIndex":6,"ogImage":1793,"ogUrl":1794,"ogSiteName":706,"ogType":707,"canonicalUrls":1794,"schema":1795},"GitLab Duo Chat, your at-the-ready AI assistant, is now generally available","Explore a real-world example of how our versatile conversational interface for GitLab Duo helps developers onboard faster and write secure code more efficiently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671806/Blog/Hero%20Images/GitLab-Duo-Chat-Thumbnail.png","https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat, your at-the-ready AI assistant, is now generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"},{\"@type\":\"Person\",\"name\":\"Laurena Alves\"}],\n        \"datePublished\": \"2024-04-18\",\n      }",{"title":1791,"description":1792,"authors":1797,"heroImage":1793,"date":1798,"body":1799,"category":10,"tags":1800},[1122,934],"2024-04-18","GitLab Duo Chat is now generally available in GitLab 16.11, bringing a broad range of AI features together into a single easy-to-use, natural language chat experience. Chat provides real-time guidance for engineering and non-engineering users across the entire software development lifecycle — supporting teams in a wide range of tasks, from understanding code faster and boosting collaboration to quickly learning how to use GitLab.\n\nToday, we are also making the following GitLab Duo capabilities, which are accessible in Chat, generally available:\n- **Code explanation** helps developers understand unfamiliar code.\n- **Code refactoring** enables developers to improve and modernize existing code.\n- **Test generation** automates the writing of tests for functions and methods and helps teams catch bugs sooner.\n\nGitLab Duo Chat is available within the GitLab user interface, including GitLab’s Web IDE, as well as within popular IDEs, including VS Code and the JetBrains suite of IDEs. Chat can also be extended by adding custom tools to help users more accurately complete tasks unique to their organization.\n\nGitLab's [2023 State of AI in Software Development Report](https://about.gitlab.com/developer-survey/#ai) found that developers spend 75% of their time on tasks other than writing code, including understanding and improving existing code, maintaining code, and testing — and AI, when leveraged across the entire software development lifecycle, will drive efficiencies across all these areas. The report also found that over three-quarters of DevSecOps professionals said they are concerned about AI tools having access to private information or intellectual property. By providing teams with a single entry point into AI for the entire software development process, Chat helps teams boost efficiency without sacrificing security, privacy, or code quality. \n\nIn this blog, we’ll explore an example of how you can use Chat to jump into a new project and immediately start adding value.\n\n## Get up to speed, fast\nAs a developer during onboarding, you can ask Chat for help with general knowledge such as understanding CI/CD, the difference between an issue and an epic, how to reset your GitLab password, or how to get started with popular frameworks. With Chat, you have an assistant ready to answer all of your onboarding questions, and soon you’re ready to dig into your first project.\n\n\u003Ccenter>\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175955/Blog/tottp2iwt85d7grkytpc.png\" alt=\"Chat GA blog CI/CD explanation\" width=\"300\">\n\u003C/figure>\n\u003C/center>\n\u003Cp>\u003C/p>\n\n## Accelerate development\nYour first task is to help the Product team update the product UI to include totals for the prices of all products in the inventory.\n\nTo better understand where to add the new functionality, you can ask Chat to explain a portion of the application and get a quick, easy-to-understand explanation of how the code works — without context-switching. Then, as you start developing the new feature, you can use GitLab Duo Code Suggestions to write code more efficiently. GitLab Duo lets you stay in flow by predictively completing code blocks and proposing common code, all in the same environment where you’re already coding.\n\nFinally, you spot a few lines of code that can be simplified, so you ask Chat to refactor the code. After just a few minutes, you have a better understanding of how your application works — and you have new, high-quality code to implement the functionality the Product team is looking for.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/933806295?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Help Center Video\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n## Stay secure\nBefore you merge any of your new code, you’ll need to run some tests to ensure everything works as expected. You can ask Chat to create unit tests for a Java class you modified. You then paste the unit tests into a new file. You notice that the vulnerability scanner for infrastructure as code has not been enabled, so you ask Chat about it and quickly append the scanner to the project pipeline.\n\nNow you can merge your code with confidence.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/933806352?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Help Center Video\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\n> Explore tips and tricks for [integrating GitLab Duo Chat into your AI-powered DevSecOps workflows](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/).\n\n## Adopt AI with guardrails\nLuckily, while you’ve been busy onboarding, getting up to speed on your first project, and merging your first lines of code, your organization can rest assured that Chat is built with privacy in mind and doesn't use customer data to train AI models. In addition, GitLab Duo AI access controls enable organizations to control sensitive data at the project, sub-group, and group levels by enabling or disabling AI read access.\n\n> Visit the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to explore how GitLab implements governance and transparency in GitLab Duo.\n\n## Get started with GitLab Duo Chat today\nWhether you’re a developer or you’re managing the entire team, GitLab Duo Chat can empower you to take advantage of AI exactly where you need it throughout the software development lifecycle — all while helping you maintain code quality and security guardrails. GitLab Duo Pro, including code explanation, code refactoring, and test generation, is now available to GitLab Premium and Ultimate customers for $19 USD per user per month.\n\n![GitLab Duo Pro price card](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677968/Blog/Content%20Images/GitLab_Pricing_Guide_PRO_820px_B.png)\n\n> [Start a free trial of GitLab Duo and get started using Chat today.](https://about.gitlab.com/gitlab-duo/#free-trial)\n\n## What’s next for GitLab Duo \nWe are building [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to empower your teams to develop software faster, secure applications more efficiently, better manage software pipelines, and enhance team collaboration. We are excited to share that several GitLab Duo features currently in Beta will soon be released into general availability. Key enhancements include improved AI-powered security capabilities, such as detailed vulnerability explanations and automated resolution processes, as well as summarization and templating tools to help teams improve collaboration in issues, merge requests, and code reviews with concise AI-powered summaries and auto-population of templates.\n\nIn line with our privacy-first approach to AI, we will also continue to introduce new ways for organizations to tailor GitLab Duo to their specific needs with two upcoming capabilities:\n\n- **Model personalization** will enable organizations to leverage the full potential of AI in a way that aligns closely with their strategic goals, operational needs, and customer expectations.\n- **Self-hosted model deployment** will help organizations ensure that data doesn't leave their secure environment, reducing the risk of breaches and ensuring compliance with data protection regulations.\n\n> Visit the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/ai_features.html) for a complete list of generally available, Beta, and Experimental features.",[9,738,496,1181],{"slug":1802,"featured":91,"template":693},"gitlab-duo-chat-now-generally-available","content:en-us:blog:gitlab-duo-chat-now-generally-available.yml","Gitlab Duo Chat Now Generally Available","en-us/blog/gitlab-duo-chat-now-generally-available.yml","en-us/blog/gitlab-duo-chat-now-generally-available",{"_path":1808,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1809,"content":1814,"config":1819,"_id":1821,"_type":14,"title":1822,"_source":16,"_file":1823,"_stem":1824,"_extension":19},"/en-us/blog/gitlab-duo-code-suggestions-is-generally-available",{"title":1810,"description":1811,"ogTitle":1810,"ogDescription":1811,"noIndex":6,"ogImage":929,"ogUrl":1812,"ogSiteName":706,"ogType":707,"canonicalUrls":1812,"schema":1813},"GitLab Duo Code Suggestions is generally available","Learn how our AI-powered workflow helps developers write secure code efficiently.\n","https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Code Suggestions is generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-12-22\",\n      }",{"title":1810,"description":1811,"authors":1815,"heroImage":929,"date":1816,"body":1817,"category":10,"tags":1818},[1549],"2023-12-22","GitLab Duo Code Suggestions, part of the GitLab Duo suite of AI-powered workflows, is now generally available with GitLab 16.7. [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/), our generative AI code creation assistant within our DevSecOps platform, helps developers to write secure code more efficiently and assists in improving cycle times by taking care of repetitive, routine coding tasks.\n\nAccording to GitLab's [2023 State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai) report, 83% of DevSecOps professionals said it is essential to implement AI in their software development processes to avoid falling behind, and a majority were interested in using AI for code generation and code suggestions.\n\nAs DevSecOps teams incorporate AI in the software development lifecycle, tapping into easy-to-adopt features like Code Suggestions provides a good entry point to achieve improved efficiency, accuracy, and productivity while not compromising on security and governance.\n\n> [Try Code Suggestions for free](http://about.gitlab.com/solutions/code-suggestions/sales) through February 14.\n\n## Faster development with less context switching\n\nA developer's workload is more than just writing code; it involves extensive context switching to search through documentation, hunt for code examples, and work through trial and error. All of this interrupts the software development process, decreasing time to value.\n\nLeveraging generative AI, Code Suggestions helps boost developers' efficiency and effectiveness by assisting in reducing the time required for coding fundamental functions. It also helps in understanding and extending existing, and sometimes unfamiliar, codebases while helping ensure adherence to security best practices. Code Suggestions includes the following capabilities:\n\n- **Code generation:** automatically generates lines of code, including full functions, from single and multi-line comments as well as comment blocks \n- **Code completion:** automatically proposes new lines of code from a few typed characters\n\nCode Suggestions is available in 15 languages, including C++, C#, Go, Java, JavaScript, Python, PHP, Ruby, Rust, Scala, Kotlin, and TypeScript. GitLab editor extensions can be found in popular IDE marketplaces; VS Code, Visual Studio, JetBrains’ suite of IDEs, and Neovim are all supported. And, of course, Code Suggestions is available within GitLab’s Web IDE, giving developers a quick way to get up and running with GitLab Duo. Support for Code Suggestions is available for self-managed GitLab instances via a secure connection to GitLab cloud infrastructure.\n\nWatch this introduction to Code Suggestions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ds7SG1wgcVM?si=9J9gX0qs5De2NXUC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Discover more AI capabilities with GitLab Duo\n\nOrganizations can use AI to help streamline the entire DevSecOps lifecycle and ship better, more secure, software faster. GitLab Duo has 15 AI-assisted features that support everyone involved in software development. From planning and coding to testing to delivery, there's a [GitLab Duo](https://about.gitlab.com/gitlab-duo/) capability to help. \n\nFor example, [GitLab Duo Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) helps teams remediate vulnerabilities proactively with the assistance of generative AI. In addition, Discussion Summary assists in getting everyone up to speed and aligned on lengthy conversations within [GitLab Enterprise Agile Planning](https://about.gitlab.com/blog/gitlab-enterprise-agile-planning-add-on-for-all-roles/).\n\nOur approach to AI is resonating with our customers. For example, Amado Gramajo, Vice President of Infrastructure & DevOps at Nasdaq, recently shared his excitement about how GitLab Duo will help Nasdaq protect their intellectual property and stay in line with regulatory mandates.\n\nGitLab is the only platform that integrates AI throughout the entire software development lifecycle. As developers become more effective, GitLab helps security and operations team members to keep pace. GitLab has a privacy- and transparency-first approach to AI and [does not use customer code to train AI models](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-suggestions-data-usage). \n\n## Get started with Code Suggestions today\n\nCode Suggestions, which can be trialed for free from December 21 through February 14 (subject to [GitLab’s Testing Agreement](https://handbook.gitlab.com/handbook/legal/testing-agreement/)), is available as an add-on to GitLab subscriptions for an introductory price of $9 USD per user/month. [Contact us today](http://about.gitlab.com/solutions/code-suggestions/sales) to get started with Code Suggestions.\n",[9,1181,716],{"slug":1820,"featured":91,"template":693},"gitlab-duo-code-suggestions-is-generally-available","content:en-us:blog:gitlab-duo-code-suggestions-is-generally-available.yml","Gitlab Duo Code Suggestions Is Generally Available","en-us/blog/gitlab-duo-code-suggestions-is-generally-available.yml","en-us/blog/gitlab-duo-code-suggestions-is-generally-available",{"_path":1826,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1827,"content":1833,"config":1838,"_id":1840,"_type":14,"title":1841,"_source":16,"_file":1842,"_stem":1843,"_extension":19},"/en-us/blog/gitlab-duo-code-suggestions-python",{"title":1828,"description":1829,"ogTitle":1828,"ogDescription":1829,"noIndex":6,"ogImage":1830,"ogUrl":1831,"ogSiteName":706,"ogType":707,"canonicalUrls":1831,"schema":1832},"How GitLab Duo Code Suggestions helped me make long car rides fun","AI-powered Gitlab Duo Code Suggestions coupled with Remote Development workspaces helped me create a Python-based guessing game application to keep my daughter entertained.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679869/Blog/Hero%20Images/carride.jpg","https://about.gitlab.com/blog/gitlab-duo-code-suggestions-python","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab Duo Code Suggestions helped me make long car rides fun\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-08-03\",\n      }",{"title":1828,"description":1829,"authors":1834,"heroImage":1830,"date":1835,"body":1836,"category":10,"tags":1837},[774],"2023-08-03","\nOn long car rides with my daughter I have two options: Let her watch a movie or continuously play a game with her where she tries to guess a random number in my head. I often opt to play the game, but recently, my energy and patience weren't at their peak. This led to a moment of inspiration. Why not develop a simple application that my daughter could play on her own? The only hurdle was that I haven't developed an application in years, and, since then, new languages such as Python have emerged. \n\nWith the help of the AI-powered feature [GitLab Duo Code Suggestions (Beta)](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html), I was able to quickly develop a Python application, despite having no prior knowledge of the programming language.\n\n## How I built the guessing game application\nMy application development journey began by launching the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) powered by VS Code. From there, I navigated to Preferences and [activated Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-for-an-individual-user), paving the way for my AI-assisted coding session.\n\nThanks to Code Suggestions, I had the flexibility to input code or compose comments in plain text English, and receiving automated code suggestions while typing. For instance, I included only these three comments, ultimately utilizing them to construct the entire application:\n- store a random number between 0 to 10 \n- print a welcome message to Yael, ask to guess a number between 0 to 10 \n- create a loop that will continue until the user guesses the number \n\nTo my surprise, not only did the code suggestions align with my comments, but they also grasped the underlying concept of my application, offering additional code without explicit comments. After making a few minor adjustments, here is the final code for the game:\n\n```python\nimport random\n\n# Store a random number between 0 to 10\nnumber = random.randint(0, 10)\ncounter = 0\n# Print a welcome message to Yael, ask to guess a number between 0 to 10\n\nprint(\"Welcome to the guessing game, Yael!\")\nprint(\"I am thinking of a number between 0 and 10, can you guess what it is?\")\n\n# Create a loop that will continue until the user guesses the number\n\nwhile True:\n   # Get the user's guess\n   guess = int(input(\"Guess my number: \"))\n   counter += 1\n\n\n  # Check if the user's guess is correct\n   if guess == number:\n       # If the guess is correct, print a message and break out of the loop\n       print(\"You guessed it! My number was\", number, \"and it took you\", counter, \"guesses.\")\n       break\n  \n   elif    guess \u003C number:\n       print(\"Your guess is too low.\")\n   else:\n       print(\"Your guess is too high.\")\n\n```\n\nWith the assistance of Code Suggestions, I was able to navigate the intricacies of Python coding, step by step. The suggested code not only aligned perfectly with my intentions, but also expanded my understanding of the programming language, enabling me to build a functional game. \n\nAfter thoroughly testing the guessing game application using the debugging tool in VS Code, I was delighted to find that it worked flawlessly! However, a new challenge arose: How could I make this game accessible to my daughter while in the car?\n\n## How to leverage GitLab Remote Development workspaces Beta\nIf you have young children, you're likely familiar with their constant need for instant gratification. To satisfy my daughter's desire to play the new game on her iPad right away, I needed a solution.\n\nSince the game wasn't available as a mobile or web application, I decided to utilize the power of [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create a mobile environment for her.\n\nThe workspace is a temporary development environment hosted in the cloud, which offers a simple setup process and numerous advantages for developers. Now, you might wonder how this is relevant to our topic. Well, Remote Development workspaces provides a link to access the environment. This became my workaround to allow her to start playing the game immediately within that development environment directly from her iPad.\n\nThis strategy turned out to be the perfect workaround, not only allowing her to enjoy the game but also exposing her to the captivating world of programming.\n\n## Understanding beta features\nWhile my journey of developing a game in Python, with the help of Code Suggestions, has been incredibly valuable, it's important to acknowledge that the feature is currently in its beta phase. As is common with beta features, there are certain considerations to keep in mind. Due to the high demand and ongoing improvements, there may be occasional unscheduled downtime and potential delays in receiving Code Suggestions within IDEs. Additionally, it's worth noting that the suggestions generated by Code Suggestions may occasionally be of lower quality or incomplete. As Beta users, it is crucial to familiarize yourself with the [documented limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html).\n\n## Demo\nThis [click-through demo](https://go.gitlab.com/HplUKw) showcases how I used Code Suggestions to develop the guessing game application. I encourage you to give Code Suggestions a try today as you will have a lot of fun.\n\n## We are looking for your feedback! \nFeedback from Beta users of Code Suggestions is invaluable. The GitLab team eagerly awaits your input, which will play an important role in further enhancing this feature and refining its capabilities. Together, we can shape the future of Code Suggestions and make it even more powerful and reliable. To send feedback, or report on issues, use the [Code Suggestions feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152). \n\n",[1181,717,9,716],{"slug":1839,"featured":6,"template":693},"gitlab-duo-code-suggestions-python","content:en-us:blog:gitlab-duo-code-suggestions-python.yml","Gitlab Duo Code Suggestions Python","en-us/blog/gitlab-duo-code-suggestions-python.yml","en-us/blog/gitlab-duo-code-suggestions-python",{"_path":1845,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1846,"content":1852,"config":1857,"_id":1859,"_type":14,"title":1860,"_source":16,"_file":1861,"_stem":1862,"_extension":19},"/en-us/blog/gitlab-duo-enterprise-is-now-available",{"title":1847,"description":1848,"ogTitle":1847,"ogDescription":1848,"noIndex":6,"ogImage":1849,"ogUrl":1850,"ogSiteName":706,"ogType":707,"canonicalUrls":1850,"schema":1851},"GitLab Duo Enterprise is now available","Organizations have an end-to-end AI partner for faster, more secure software development. Learn how GitLab Duo Enterprise supports the entire DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665660/Blog/Hero%20Images/Untitled__1800_x_945_px_.png","https://about.gitlab.com/blog/gitlab-duo-enterprise-is-now-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Enterprise is now available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-09-03\",\n      }",{"title":1847,"description":1848,"authors":1853,"heroImage":1849,"date":1854,"body":1855,"category":10,"tags":1856},[1549],"2024-09-03","[GitLab Duo Enterprise]( https://about.gitlab.com/gitlab-duo/), now available, is an end-to-end AI partner designed for the entire software development lifecycle. This powerful suite of AI tools is designed to boost developer productivity, enhance security, streamline collaboration, and accelerate your DevSecOps processes.\n\nKey features at a glance:\n- Intelligent code assistance across 25+ programming languages\n- AI-powered security vulnerability details and resolution\n- Automated test generation and root cause analysis\n- Team collaboration enhancements with AI-driven summaries\n- ROI quantification through an AI Impact Dashboard\n\n## Why we developed GitLab Duo Enterprise\n\nAs organizations aim to deliver better software faster and create customer value, they encounter significant challenges that slow their progress. [Our research](http://about.gitlab.com/developer-survey/2024/ai) shows that 95% of organizations are either evaluating or using AI in the software development lifecycle. However, 55% of survey respondents said they feel using AI for software development is risky.\n\nCommon pain points in the enterprise include suboptimal developer experience and productivity, increasing security and compliance demands, inefficient collaboration across teams, and difficulty in assessing the ROI of AI technology investments. GitLab Duo Enterprise addresses these challenges head-on, providing a secure, efficient, and powerful AI partner for your development teams. \n\n**Let's explore how GitLab Duo Enterprise can transform the way your company creates and deploys software.**\n\n## Boost developer productivity with intelligent code assistance\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252678?h=83f35171b6&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Suggestions clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nOne of the primary hurdles in software development is the time-consuming nature of routine coding tasks. Get to the most impactful work faster with:\n\n- __Code Suggestions__ supports more than 25 programming languages. This AI-powered tool accelerates code creation, improves code quality, and reduces the time spent on boilerplate tasks.\n\nBut it's not just about writing new code. \n\n- GitLab Duo Enterprise's __Code Explanation__ capability enables developers to quickly understand complex or unfamiliar code, while \n\n- **Code Refactoring** enables developers to [improve and modernize existing code](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/). \n\n- __Test Generation__ automates the creation of comprehensive unit tests. The result? Developers can focus on high-value tasks that drive innovation, leading to faster development cycles and improved software quality.\n\n> See how [European tech company Cube](https://about.gitlab.com/customers/cube/) uses Code Suggestions, Test Generation, and other GitLab Duo features to achieve improvements in speed and efficiency. \n\n## Enhance team collaboration and communication\n\nEffective collaboration is the cornerstone of successful software development, yet it's often hindered by lengthy discussions, complex merge requests, and time-consuming code reviews. GitLab Duo Enterprise addresses these challenges with its suite of summarization and templating tools:\n- __Discussion Summary:__ Allows team members to quickly get up to speed on lengthy conversations in issues\n- __Merge Request Summaries:__ Provide clear, concise overviews of proposed changes. \n- __Code Review Summaries:__ Streamline the review process, enabling better handoffs between authors and reviewers. \n\nBy facilitating clearer communication and faster decision-making, GitLab Duo Enterprise helps teams work more efficiently and deliver results more quickly.\n\n## Streamline troubleshooting and debugging\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252688?h=fc6c048bfd&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Root Cause Analysis clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nWhen development pipelines fail, the impact on project timelines can be significant. GitLab Duo Enterprise's __Root Cause Analysis__ feature is a game-changer here. By automatically analyzing logs and providing detailed explanations of failures along with potential fixes, Root Cause Analysis significantly reduces the time spent on troubleshooting.\n\nThe benefits extend beyond just time savings. With [faster resolution of CI/CD build issues](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/), teams can maintain momentum, reduce downtime, and ultimately deliver software updates more frequently and reliably.\n\n## Elevate security across the development lifecycle\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252706?h=73e568b89c&amp;badge=0&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Vulnerability Explanation and Resolution clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nCybersecurity threats are ever-present, so robust application security is a necessity. GitLab Duo Enterprise rises to this challenge with its __Vulnerability Explanation__ and __Vulnerability Resolution__ features. These AI-powered tools help [developers fully understand security vulnerabilities](https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities/) and then automatically generate merge requests with suggested fixes.\n\n## Quantify AI impact for strategic decision-making\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1004252663?h=d35106288b&amp;badge=0&amp?autoplay=1&loop=1&autopause=0&background=1&muted=1\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Impact Dashboard clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\u003Cp>\u003C/p>\n\nDemonstrating the ROI of technology investments is crucial. GitLab Duo Enterprise addresses this need head-on with its __AI Impact Dashboard__. This analytics tool, built on top of Value Stream Analytics and DORA4 metrics, provides clear metrics on cycle time improvements and increased deployment frequencies, allowing organizations to quantify the tangible benefits of AI adoption in their development processes.\n\nBy offering insights into how AI usage correlates with key productivity metrics, the [AI Impact Dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) empowers leadership to make data-driven decisions about resource allocation and strategic technology investments.\n\n## Embrace the future of AI-powered DevSecOps\n\nAs we unveil GitLab Duo Enterprise, we're proud to announce that GitLab has been recognized as a Leader in the inaugural [Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/gartner-mq-ai-code-assistants/). This recognition underscores our commitment to delivering AI solutions that drive real business value.\n\nThe future of software development is here, and it's powered by AI. We're here to help you incorporate intelligent, scalable AI throughout the DevSecOps lifecycle so you can deliver results faster for your customers.\n\n> [Get started today with GitLab Duo Enterprise with a free  trial!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n",[9,738,496,717,1181],{"slug":1858,"featured":91,"template":693},"gitlab-duo-enterprise-is-now-available","content:en-us:blog:gitlab-duo-enterprise-is-now-available.yml","Gitlab Duo Enterprise Is Now Available","en-us/blog/gitlab-duo-enterprise-is-now-available.yml","en-us/blog/gitlab-duo-enterprise-is-now-available",{"_path":1864,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1865,"content":1870,"config":1877,"_id":1879,"_type":14,"title":1880,"_source":16,"_file":1881,"_stem":1882,"_extension":19},"/en-us/blog/gitlab-duo-pro",{"title":1866,"description":1867,"ogTitle":1866,"ogDescription":1867,"noIndex":6,"ogImage":929,"ogUrl":1868,"ogSiteName":706,"ogType":707,"canonicalUrls":1868,"schema":1869}," GitLab Duo Pro combines Code Suggestions, Chat, and organizational controls","AI-powered code completion, code generation, chat, and per-user assignment come to the GitLab DevSecOps Platform as a secure add-on.","https://about.gitlab.com/blog/gitlab-duo-pro","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \" GitLab Duo Pro combines Code Suggestions, Chat, and organizational controls\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hillary Benson\"},{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2024-01-17\",\n      }",{"title":1866,"description":1867,"authors":1871,"heroImage":929,"date":1874,"body":1875,"category":10,"tags":1876},[1872,1873],"Hillary Benson","Dave Steer","2024-01-17","> __This blog has been updated to include GitLab Duo Chat (in Beta) as part of the GitLab Duo Pro add-on.__\n\nLast year, we launched GitLab Duo Code Suggestions into general availability as the cornerstone of our GitLab Duo suite of AI capabilities that power DevSecOps workflows. We received incredibly positive feedback from our customers on the ease of use and effectiveness of Code Suggestions' code completion and code generation capabilities. \n\nNow, we are introducing [GitLab Duo Pro](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/), a new package that brings together Code Suggestions, Chat, and organizational control capabilities, ensuring that teams can take advantage of AI exactly where they need it throughout the software development lifecycle.\n\n> [Get started with GitLab Duo Pro](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) \n\nGitLab Duo Pro, which is available to Ultimate and Premium customers as an add-on, boosts developer efficiency and effectiveness by decreasing the time required to write and understand code. With GitLab Duo Pro, organizations can set up their DevSecOps teams for success by giving them the AI-assisted tools they need to develop secure code faster, improve collaboration, and reduce the security and compliance risks of AI adoption.\n\nGitLab Duo Pro features:\n- Code Suggestions: AI-powered code completion and code generation\n- Chat (Beta): Provides real-time guidance on coding, refactoring, and test generation\n- Privacy-first approach: Your code stays your code — GitLab does not use it for training or fine-tuning AI models\n- Organizational controls: GitLab Duo Pro enables organizations to have greater control over AI by limiting AI usage to approved users only\n\nGitLab Duo Pro is available across SaaS, self-managed, and Dedicated deployments.\n\n## What is GitLab Duo Pro?\n\nWith GitLab Duo Pro, developers can access [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) to generate blocks of code from single- and multi-line comments as well as comment blocks. Code Suggestions also autocompletes lines of code from a few typed characters — improving cycle times by securely taking care of repetitive, routine coding tasks.\n\nGitLab Duo Pro also includes [Chat](https://about.gitlab.com/blog/gitlab-duo-chat-beta/) (Beta), which is helpful for technical and non-technical users across the entire software development lifecycle. Chat assists in explaining unfamiliar code, suggesting and generating tests, and simplifying code. You can also use Chat to write code from scratch interactively. Whether you are onboarding to GitLab or you are already an expert, learning how to use GitLab is streamlined with Chat.\n\nOrganizational controls are provided in GitLab Duo Pro for better management of AI capabilities, including per-user assignment, so only approved users can use AI.\n\nGitLab Duo Pro is built with privacy as a critical foundation. Private, non-public customer code stored in GitLab is not used for training or fine-tuning AI models. Learn about [data usage when using GitLab Duo Pro Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-suggestions-data-usage).\n\n## Get GitLab Duo Pro today\n\nGitLab Duo Pro is currently available at a special introductory price of $9 USD per user/month to Ultimate and Premium customers. Beginning on February 1, 2024, GitLab Duo Pro will be available for $19 USD per user/month. [Contact us today](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started with GitLab Duo Pro.\n",[9,496,716],{"slug":1878,"featured":91,"template":693},"gitlab-duo-pro","content:en-us:blog:gitlab-duo-pro.yml","Gitlab Duo Pro","en-us/blog/gitlab-duo-pro.yml","en-us/blog/gitlab-duo-pro",{"_path":1884,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1885,"content":1891,"config":1897,"_id":1899,"_type":14,"title":1900,"_source":16,"_file":1901,"_stem":1902,"_extension":19},"/en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy",{"title":1886,"description":1887,"ogTitle":1886,"ogDescription":1887,"noIndex":6,"ogImage":1888,"ogUrl":1889,"ogSiteName":706,"ogType":707,"canonicalUrls":1889,"schema":1890},"GitLab Duo Self-Hosted: Enterprise AI built for data privacy","Customers in regulated industries can now deploy GitLab Duo on self-managed infrastructure, leveraging the power of generative AI while helping to address data residency and privacy concerns.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097840/Blog/Hero%20Images/Blog/Hero%20Images/Self-Hosted%201800x945_1dL1II2ITh2PteObA9DBLD_1750097839679.png","https://about.gitlab.com/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Self-Hosted: Enterprise AI built for data privacy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Susie Bitters\"},{\"@type\":\"Person\",\"name\":\"Aathira Nair\"}],\n        \"datePublished\": \"2025-02-27\",\n      }",{"title":1886,"description":1887,"authors":1892,"heroImage":1888,"date":1894,"body":1895,"category":10,"tags":1896},[1300,1893],"Aathira Nair","2025-02-27","We are excited to announce the general availability of GitLab Duo Self-Hosted for Code Suggestions and Chat. An optional capability for self-managed customers with a GitLab Duo Enterprise subscription, GitLab Duo Self-Hosted supports deployment flexibility across multiple platforms, including on-premises infrastructure or in private clouds and secure cloud environments through AWS Bedrock and Azure OpenAI. GitLab Duo Self-Hosted empowers teams to innovate with AI while helping them maintain control over sensitive data and intellectual property. \n\nSecurity concerns have been a major barrier to AI adoption in regulated industries. In our [Global DevSecOps Survey](http://about.gitlab.com/developer-survey/2024/ai), more than half of the respondents said that introducing AI into the software development lifecycle is risky. With [GitLab Duo](https://about.gitlab.com/gitlab-duo/), we gave organizations a way to ship more secure software faster with AI throughout the entire software development lifecycle.\n\nGitLab Duo Self-Hosted expands the availability of GitLab Duo AI features to organizations with stringent data privacy requirements, offering flexibility in both AI large language model (LLM) selection and deployment options. The earliest adopters of GitLab Duo Self-Hosted include organizations in the public sector and regulated industries  – e.g., financial services, automotive, and healthcare. These organizations seek to gain the competitive advantage of AI by integrating AI-powered development tools into their environments, while also giving security teams the control they need.\n\nAs one U.S. government agency says: “After selecting GitLab as the cornerstone of our agency-wide DevSecOps platform, we chose GitLab Duo Self-Hosted to further advance our software factory capabilities. GitLab Duo’s ability to operate in air-gapped environments and provide granular control over our data was crucial to delivering secure AI-powered features. This unified approach streamlines our workflow and strengthens security, allowing us to leverage AI for increased productivity while meeting strict compliance requirements.” \n\n![GitLab Duo Self-Hosted models](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097848329.png)\n\n## Architect secure AI deployments\n\nGitLab Duo Self-Hosted enables GitLab Duo features that leverage a curated selection of leading AI LLMs, including those from Anthropic, Mistral, and OpenAI. Here are the LLMs supported by GitLab today:\n\n* On-premises - Mistral models with the vLLM serving platform  \n* AWS - Mistral and Anthropic Claude 3.5 Sonnet via AWS Bedrock  \n* Microsoft Azure - OpenAI GPT models via Azure AI\n\nWe are evaluating more models to support in the near future. [Learn more about the LLMs we support.](https://docs.gitlab.com/ee/administration/self_hosted_models/supported_models_and_hardware_requirements.html#approved-llms)\n\nGitLab Duo Self-Hosted deployment options include on-premises installations powered by the open-source vLLM framework, as well as private-cloud deployments via services like AWS Bedrock and Microsoft Azure AI. This flexibility helps organizations to architect AI solutions that align with their unique security, compliance, and performance requirements.\n\n## Simplify AI/ML implementation\n\nGitLab Duo's AI abstraction layer standardizes and simplifies the integration of the chosen LLM to a feature, mitigating the burden of implementing AI/ML technologies. This enables companies to streamline their AI adoption efforts and enhance the developer experience, free from the complexities of integrating and maintaining multiple tools.\n\n![GitLab Duo Self-Hosted AI-powered features](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097848330.png)\n\n## Maintain control of sensitive data\n\nBy isolating your GitLab instance, AI gateway, and LLMs in your own environment or country of choice, GitLab Duo Self-Hosted makes it possible that sensitive data and intellectual property remain within your designated perimeter. Granular control over data locality helps enable adherence to strict data residency regulations, while adopting AI capabilities in secure settings. Whether you use GitLab Duo Self-Hosted in a completely air-gapped environment with vLLM or leverage a supported private cloud, you can control all aspects of the deployment to include the geographic location of components. By eliminating the reliance on external APIs and providing full visibility into all request and response logs, GitLab Duo Self-Hosted helps even the most regulated organizations confidently adopt AI capabilities and meet the most stringent compliance obligations.\n\n**Start an interactive tour of GitLab Self-Hosted by clicking on the image below:**\n\n[![GitLab Duo Self-Hosted tour screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097848/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2025-02-20_at_7.00.34_AM_aHR0cHM6_1750097848332.png)](https://gitlab.navattic.com/gitlab-duo-self-hosted)\n\n## Get started with GitLab Duo Self-Hosted today\n\nIf you're ready to advance your AI journey while addressing security and data privacy, [reach out to us](https://about.gitlab.com/sales/) to help set up GitLab Duo Self-Hosted in your environment today.",[9,717,496,738,1181],{"slug":1898,"featured":91,"template":693},"gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy","content:en-us:blog:gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy.yml","Gitlab Duo Self Hosted Enterprise Ai Built For Data Privacy","en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy.yml","en-us/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy",{"_path":1904,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1905,"content":1909,"config":1917,"_id":1919,"_type":14,"title":1920,"_source":16,"_file":1921,"_stem":1922,"_extension":19},"/en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock",{"config":1906,"title":1907,"description":1908},{"noIndex":6},"Own your AI: Self-Hosted GitLab Duo models with AWS Bedrock","Discover how to leverage AI while maintaining control over your data, infrastructure, and security posture.",{"title":1907,"description":1908,"authors":1910,"heroImage":1913,"body":1914,"category":10,"tags":1915,"date":1916},[1911,1912],"Chloe Cartron","Olivier Dupré","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098682/Blog/Hero%20Images/Blog/Hero%20Images/duo-blog-post_1Cy89R1pY8OMwyrgSB525O_1750098682075.png","As organizations adopt AI capabilities to accelerate their software\ndevelopment lifecycle, they often face a critical challenge: how to leverage\nAI while maintaining control over their data, infrastructure, and security\nposture. This is where [GitLab Duo\nSelf-Hosted](https://about.gitlab.com/gitlab-duo/) provides a compelling\nsolution.\n\nIn this article, we'll walk through the implementation of GitLab Duo Self-Hosted models. This comprehensive guide helps organizations needing to meet strict data sovereignty requirements while still leveraging AI-powered development. The focus is on using models hosted on AWS Bedrock rather than setting up an [LLM](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/) serving solution like vLLM. However, the methodology can be applied to models running in your own data center if you have the necessary capabilities.\n\n## Why GitLab Duo Self-Hosted?\n\nGitLab Duo Self-Hosted allows you to deploy GitLab's AI capabilities entirely within your own infrastructure, whether that's on-premises, in a private cloud, or within your secure environment.\n\n\nKey benefits include:\n\n* **Complete Data Privacy and Control:** Keep sensitive code and intellectual property within your security perimeter, ensuring no data leaves your environment.\n\n* **Model Flexibility:** Choose from a variety of models tailored to your specific performance needs and use cases, including Anthropic Claude, Meta Llama, Mistral families, and OpenAI GPT families.\n\n* **Compliance Adherence:** Meet regulatory requirements in highly regulated industries where data must remain within specific geographical boundaries.\n\n* **Customization:** Configure which GitLab Duo features use specific models to optimize performance and cost.\n\n* **Deployment Flexibility:** Deploy in fully air-gapped environments, on-premises, or in secure cloud environments.\n\n\n## Architecture overview\n\nThe GitLab Duo Self-Hosted solution consists of three core components:\n\n1. **Self-Managed GitLab instance**: Your existing GitLab instance where users interact with GitLab Duo features.\n\n2. **AI Gateway**: A service that routes requests between GitLab and your chosen LLM backend.\n\n3. **LLM backend**: The actual AI model service, which, in this article, will be AWS Bedrock.\n\n**Note:** You can use [another serving platform](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/) if you are running on-premises or using another cloud provider.\n\n\n![Air-gapped network flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/jws4h2kakflfrczftypj.png)\n\n\n## Prerequisites\n\nBefore we begin, you'll need:\n\n* A GitLab Premium or Ultimate instance (Version 17.10 or later)  \n\n  * We strongly recommend using the latest version of GitLab as we continuously deliver new features.\n\n* A GitLab Duo Enterprise add-on license  \n\n* AWS account with access to Bedrock models *or your API key and credentials needed to query your LLM Serving model*\n\n\n**Note:** If you aren't a GitLab customer yet, you can [sign up for a free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), which includes GitLab Duo Enterprise.\n\n## Implementation steps\n\n**1. Install the AI Gateway**\n\n\nThe AI Gateway is the component that routes requests between your GitLab instance and your LLM serving infrastructure — here that is AWS Bedrock. It can run in a Docker image. Follow the instructions from our [installation documentation](https://docs.gitlab.com/install/install_ai_gateway/) to get started. \n\n\nFor this example, using AWS Bedrock, you also must pass the AWS Key ID and Secret Access Key along with the AWS region.  \n\n\n```yaml\n\nAIGW_TAG=self-hosted-v18.1.2-ee`\n\ndocker run -d -p 5052:5052 \\\n\n  -e AIGW_GITLAB_URL=\u003Cyour_gitlab_instance> \\\n\n  -e AIGW_GITLAB_API_URL=https://\u003Cyour_gitlab_domain>/api/v4/ \\\n\n  -e AWS_ACCESS_KEY_ID=$AWS_KEY_ID\n\n  -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \\\n\n  -e AWS_REGION_NAME=$AWS_REGION_NAME \\\n\nregistry.gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/model-gateway:$AIGW_TAG \\\n\n```\n\nHere is the [`AIGW_TAG` list](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist/-/tags).\n\n\nIn this example we use Docker, but it is also possible to use the Helm chart. Refer to [the installation documentation](https://docs.gitlab.com/install/install_ai_gateway/#install-by-using-helm-chart) for more information.\n\n\n**2. Configure GitLab to access the AI Gateway**\n\n![Configure GitLab to access the AI Gateway](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/xj9kvljkqsacpsw41k4a.png)\n\nNow that the AI gateway is running, you need to configure your GitLab instance to use it.\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo**.  \n\n  - In the GitLab Duo section, select **Change configuration**.  \n\n  - Under Local AI Gateway URL, enter the URL for your AI gateway and port for the container (e.g., `https://ai-gateway.example.com:5052`).\n  \n  - Select **Save changes**.\n\n\n**3. Access models from AWS Bedrock** \n\n\nNext, you will need to request access to the available models on AWS Bedrock. \n\n\n  - Navigate to your AWS account and Bedrock.  \n\n  - Under **Model access**, select the models you want to use and follow the instructions to gain access. \n\n\nYou can find more information in the [AWS Bedrock documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html).\n\n\n**4. Configure the self-hosted model**\n\nNow, let's configure a specific AWS Bedrock model for use with GitLab Duo.\n\n![Add the self-hosted model screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422792/chrlgdvxwdetcszptsav.png)\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo Self-Hosted**.  \n\n  - Select **Add self-hosted model**.\n  \n  - Fill in the fields:  \n    * **Deployment name**: A name to identify this model configuration (e.g., \"Mixtral 8x7B\")  \n    * **Platform:** Choose AWS Bedrock  \n    * **Model family:** Select a model, for example here \"Mixtral\"  \n    * **Model identifier:** bedrock/`model-identifier` [from the supported list](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/).\n    \n  - Select **Create self-hosted model**.\n\n\n**5. Configure GitLab Duo features to use your self-hosted model**\n\n\nAfter configuring the model, assign it to specific GitLab Duo features.\n\n![Screen to configure self-hosted model features](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422793/an2i9s2p9cja2xx27g4z.png)\n\n  - On the left sidebar, at the bottom, select **Admin**.  \n\n  - Select **GitLab Duo Self-Hosted**.  \n\n  - Select the **AI-powered features** tab.  \n\n  - For each feature (e.g., Code Suggestions, GitLab Duo Chat) and sub-feature (e.g., Code Generation, Explain Code), select the model you just configured from the dropdown menu.\n\n\nFor example, you might assign Mixtral 8x7B to Code Generation tasks and Claude 3 Sonnet to the GitLab Duo Chat feature.\n\nCheck out the [requirements documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/) to select the right model for the use case from the models compatibility list per Duo feature. \n\n## Verifying your setup\n\nTo ensure that your GitLab Duo Self-Hosted implementation with AWS Bedrock is working correctly, perform these verification steps:\n\n**1. Run the health check**\n\nAfter running the health check of your model to be sure that it’s up and running, Return to the GitLab Duo section from the Admin page and click on **Run health check**. This will verify if:   \n\n* The AI gateway URL is properly configured.  \n\n* Your instance can connect to the AI gateway.  \n\n* The Duo Licence is activated.   \n\n* A model is assigned to Code Suggestions — *as this is the model used to test the connection.*\n\n\n![Running the health check](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754422793/yffw21yhjpwummw1ffsw.png)\n\n\nIf the health check reports issues, refer to the [troubleshooting guide](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/troubleshooting/%20%20%20) for common errors. \n\n\n**2. Test GitLab Duo features**\n\nTry out a few GitLab Duo features to ensure they're working:\n\n* In the UI, open GitLab Duo Chat and ask it a question.  \n\n* Open the web IDE  \n  * Create a new code file and see if Code Suggestions appears.  \n  * Select a code snippet and use the `/explain` command to receive an explanation from Duo Chat. \n\n**3. Check AI Gateway logs**\n\nReview the AI gateway logs to see the requests coming to the gateway from the selected model:\n\nIn your terminal, run:\n\n```yaml\ndocker logs \u003Cai-gateway-container-id>\n```\n\nOptional: In AWS, you can [activate CloudWatch and S3 as log destinations](https://docs.aws.amazon.com/bedrock/latest/userguide/model-invocation-logging.html). Doing so would enable you to see all your requests, prompts, and answers in CloudWatch.\n\n**Warning:** Keep in mind that activating these logs in AWS logs user data, which may not comply with privacy rules.\n\nAnd here you have full access to using GitLab Duo's AI features across the platform while retaining complete control over the data flow operating within the secure AWS cloud.\n\n## Next steps\n\n### Selecting the right model for each use case\n\nThe GitLab team actively tests each model's performance for each feature and provides [tier ranking of model's performance and suitability depending on the functionality:](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_models_and_hardware_requirements/#supported-models)\n\n- Fully compatible: The model can likely handle the feature without any loss of quality.  \n\n- Largely compatible: The model supports the feature, but there might be compromises or limitations.  \n\n- Not compatible: The model is unsuitable for the feature, likely resulting in significant quality loss or performance issues.\n\nAs of this writing, most GitLab Duo features can be configured with Self Hosted. The complete availability overview is available in the [documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/#supported-gitlab-duo-features). \n\n### Going beyond AWS Bedrock\n\nWhile this guide focuses on AWS Bedrock integration, GitLab Duo Self-Hosted supports multiple deployment options:\n\n1. [On-premises with vLLM](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/#vllm): Run models locally with vLLM for fully air-gapped environments.  \n\n2. [Azure OpenAI Service](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/supported_llm_serving_platforms/#for-cloud-hosted-model-deployments): Similar to AWS Bedrock, you can use Azure OpenAI for models like GPT-4.\n\n## Summary\n\nGitLab Duo Self-Hosted provides a powerful solution for organizations that need AI-powered development tools while maintaining strict control over their data and infrastructure. By following this implementation guide, you can deploy a robust solution that meets security and compliance requirements without compromising on the advanced capabilities that AI brings to your software development lifecycle.\n\nFor organizations with stringent security and compliance needs, GitLab Duo Self-Hosted strikes the perfect balance between innovation and control, allowing you to harness the power of AI while keeping your code and intellectual property secure within your boundaries.\n\nWould you like to learn more about implementing GitLab Duo Self-Hosted in your environment? Please [reach out to a GitLab representative](https://about.gitlab.com/sales/) or [visit our documentation](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/) for more detailed information.\n",[9,835],"2025-08-07",{"featured":91,"template":693,"slug":1918},"gitlab-duo-self-hosted-models-on-aws-bedrock","content:en-us:blog:gitlab-duo-self-hosted-models-on-aws-bedrock.yml","Gitlab Duo Self Hosted Models On Aws Bedrock","en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock.yml","en-us/blog/gitlab-duo-self-hosted-models-on-aws-bedrock",{"_path":1924,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1925,"content":1930,"config":1936,"_id":1938,"_type":14,"title":1939,"_source":16,"_file":1940,"_stem":1941,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"title":1926,"description":1927,"ogTitle":1926,"ogDescription":1927,"noIndex":6,"ogImage":1354,"ogUrl":1928,"ogSiteName":706,"ogType":707,"canonicalUrls":1928,"schema":1929},"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available","The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2025-04-17\",\n      }",{"title":1926,"description":1927,"authors":1931,"heroImage":1354,"date":1933,"body":1934,"category":10,"tags":1935},[1932],"Emilio Salvador","2025-04-17","Today, we're excited to announce the general availability of [GitLab Duo with Amazon Q](https://about.gitlab.com/partners/technology-partners/aws/), delivering agentic AI throughout the software development lifecycle for AWS customers. GitLab Duo with Amazon Q, based on GitLab Ultimate, includes many familiar features such as code completion, code explanation, code generation, chat, and vulnerability explanation and resolution – all of which are now powered by Amazon Q. It is available with a Self-Managed deployment model for customers on Amazon Web Services (AWS).\n\nWith Amazon Q's agents directly embedded into GitLab's DevSecOps platform, developers maintain their familiar development environment while gaining powerful AI capabilities. The result is a frictionless experience that helps accelerate development cycles, reduce manual effort, and enhance code quality.\n\n“Participating in the early access program for GitLab Duo with Amazon Q has given us a glimpse into its transformative potential for our development workflows,” said Osmar Alonso, DevOps Engineer, Volkswagen Digital Solutions. “Even in its early stages, we saw how the deeper integration with autonomous agents could streamline our process, from code commit to production. We're excited to see how this technology empowers our team to focus on innovation and accelerate our digital transformation.\"\n\n## Agentic AI comes to complex customer environments\n\nBy combining agentic AI with secure, reliable cloud infrastructure, GitLab and AWS bring built-in security, scale, and reliability to complex customer environments, enabling them to realize the following benefits:\n\n__Unified developer experience for streamlined development__\n\nDevelopers can interact with Amazon Q through the GitLab Duo Chat interface from their preferred IDE or the GitLab web interface. This eliminates the need for context switching in other tools and helps developers stay focused on the project that they’re working on.\n\n__One solution for the entire software development lifecycle__\n\nCode suggestions and optimizations leverage AWS-specific patterns and practices, while testing tools understand AWS service interactions and dependencies. A common data store across all stages provides essential context to AI agents, enabling complete visibility and traceability for relevant actions.\n\n__Secure development with enterprise-grade guardrails__\n\nEnd-to-end security and compliance are built directly into the development platform with guardrails that help reduce risk without impeding velocity. This secure software development approach enforces transparency and auditability through AI agents while seamlessly integrating with AWS security services and compliance frameworks.\n\n## How to start using GitLab Duo with Amazon Q\n\nHere are five initial use cases we’re targeting to help teams build secure software faster with agentic AI: \n\n1. **Feature development acceleration** - Create issue descriptions, generate implementation plans based on your existing codebase, and produce complete merge requests ready for review. This drives feature delivery acceleration while maintaining consistency with internal development standards.  \n2. **Legacy application modernization** - Analyze your legacy Java codebase, create a comprehensive upgrade plan, and generate a merge request with all necessary code changes. This unlocks faster Java upgrade time, while providing a clear audit trail of all code transformations. Support for .NET and other languages is planned for future releases.  \n3. **Quality assurance enhancement** - Analyze code and automatically create comprehensive unit tests that understand your application logic and AWS service interactions. This increases test coverage, reduces manual test writing effort, and helps ensure consistent test quality across applications.  \n4. **Code review optimization** - Provide inline feedback on code changes, suggesting improvements based on development standards, highlighting security and performance considerations. This enables reduced code review cycles and delivery of higher-quality code merges for deployment.  \n5. **Vulnerability remediation** - Explain detected vulnerabilities in clear, detailed terms and one-click remediation based on recommended code changes, helping to significantly reduce the time from detection to remediation.\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> #### Get the benefits of GitLab Duo with Amazon Q today\n> GitLab's unified, AI-powered DevSecOps platform with Amazon Q's advanced AI capabilities provides AWS customers with a solution that transforms how teams build and deploy software. To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).",[9,496,835,738,717,1181],{"slug":1937,"featured":91,"template":693},"gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","content:en-us:blog:gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","Gitlab Duo With Amazon Q Agentic Ai Optimized For Aws","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"_path":1943,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1944,"content":1949,"config":1954,"_id":1956,"_type":14,"title":1957,"_source":16,"_file":1958,"_stem":1959,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"title":1945,"description":1946,"ogTitle":1945,"ogDescription":1946,"noIndex":6,"ogImage":1354,"ogUrl":1947,"ogSiteName":706,"ogType":707,"canonicalUrls":1947,"schema":1948},"GitLab Duo with Amazon Q: DevSecOps meets agentic AI","AI-powered DevSecOps enhanced with autonomous AI agents accelerates developer productivity, application modernization, and innovation.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: DevSecOps meets agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":1945,"description":1946,"authors":1950,"heroImage":1354,"date":1951,"body":1952,"category":10,"tags":1953},[1932],"2024-12-03","We're excited to announce GitLab Duo with Amazon Q, a joint offering that brings together GitLab's comprehensive AI-powered DevSecOps platform with Amazon Q's autonomous AI agents in a single, integrated solution.\n\nGitLab Duo with Amazon Q transforms software development by integrating powerful AI agents directly into your daily workflows. Instead of switching between multiple tools, developers can now accelerate key tasks — from feature development to code reviews — all from within GitLab's comprehensive DevSecOps platform. Amazon Q’s AI agents act as intelligent assistants, automating time-consuming tasks like generating code from requirements, creating unit tests, conducting code reviews, and modernizing Java applications. By handling these complex tasks, this joint offering helps teams focus on innovation, while maintaining security and quality standards.\n\nThis enterprise-class developer experience includes:\n* The GitLab unified platform with one single data store, which automates the building, testing, packaging, and deployment of secure code\n* GitLab Duo, enhanced with Amazon Q developer, which leverages GitLab project context to generate multi-file changes based on the task\n* Amazon Q AI agents integrated with GitLab Duo, updating issues and creating merge requests per task, with permission scoped to the project\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1033653810?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo and Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Partnership innovation: GitLab and AWS\n\nGitLab Duo with Amazon Q is the result of close collaboration between GitLab and AWS engineering teams, combining our strengths to transform software development. This partnership unites GitLab's expertise in unified DevSecOps with AWS's leadership in cloud computing, creating an innovative solution that understands how developers work.\n\nBy integrating Amazon Q's autonomous agents with GitLab's comprehensive AI-powered platform, we've built more than a technical integration. We've created an experience that makes AI-powered development feel natural and upholds the security, compliance, and reliability that enterprises require.\n\nIndustry analysts recognize the significance of this integration in advancing AI-powered software development:\n\n***\"With this joint offering, GitLab and AWS are combining their strengths to make agentic AI a reality in software development,\" said Katie Norton, Research Manager at IDC. \"GitLab Duo with Amazon Q addresses strong use cases and critical challenges that empower customers to harness the full potential of AI.\"***\n\n***\"Both developers and the organizations they work for are increasingly interested in simplified and unified experiences,\" says Rachel Stephens, senior analyst at RedMonk. \"Especially in the era of AI – when security and privacy are paramount concerns – organizations want to both harness the power of cutting edge technology while also controlling risk and minimizing disjointed software tool chains. The partnership between GitLab Duo and Amazon Q seeks to give developers the tools they need within the context of an end-to-end DevSecOps experience.\"***\n\n## 4 key customer benefits \n\nGitLab Duo with Amazon Q pairs AI-powered DevSecOps with the deepest set of cloud computing capabilities. Together, they help development teams:\n\n### 1. Streamline feature development from idea to code \n\nDevelopment teams often spend hours translating requirements into code, leading to slower delivery and inconsistent implementation. You can now invoke the GitLab Duo with Amazon Q agent by utilizing a new quick action `/q dev`, which will convert an issue description directly into merge-ready code in minutes. The agent analyzes requirements, plans the implementation, and generates a complete merge request — all while adhering to your team's development standards. Teams can iterate rapidly using feedback in comments, significantly reducing the time from idea to production.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050110?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Feature Dev with Rev\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Modernize legacy code without the headache \n\nUpgrading Java applications traditionally requires weeks of careful planning, manual code changes, and extensive testing. By using quick action `/q transform`, you can change this by automating the entire Java modernization process. In minutes, not hours, the agent analyzes your Java 8 or 11 codebase, creates a comprehensive upgrade plan, and generates fully documented merge requests for Java 17 migration. Every change is tracked and traceable, giving teams confidence while improving application security and performance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050145?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"QCT\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. Accelerate code reviews without sacrificing quality \n\nCode reviews often create bottlenecks: Teams wait days for feedback yet must maintain consistent standards. With the `/q review` quick action, you get instant, intelligent feedback on code quality and security directly in merge requests. By automatically identifying potential issues and suggesting improvements based on your standards, teams can maintain high-quality code while dramatically reducing review cycles.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050136?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Reviews\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Automate testing to ship with confidence\n\nManual test creation is time-consuming and often leads to inconsistent coverage across teams. With the `/q test` quick action, you can automatically generate comprehensive unit tests that understand your application logic. The agent ensures thorough coverage of critical paths and edge cases, matching your existing testing patterns. This automation helps teams catch issues earlier and maintain consistent quality standards, saving valuable developer time.\n\n\u003Cdiv style=\"padding:54.37% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050181?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Use GitLab Duo with Amazon Q to add tests\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Enterprise-grade security and guardrails included\n\nBuilt for enterprise scale and security, this offering combines GitLab's integrated security, compliance, and privacy with Amazon Q's AI agent, accelerating developer workflows to help organizations ship secure software faster.\n\nThe integration features:\n\n* Built-in guardrails that maintain development velocity  \n* Granular controls for AI-powered features at user, project, and group levels  \n* End-to-end security integration with existing workflows\n\nDevSecOps teams can securely scale the development environment with the world's most broadly adopted cloud.\n\n## What's next\n\nGitLab Duo with Amazon Q builds on our existing integration with [AWS announced in May 2024](https://press.aboutamazon.com/2024/4/aws-announces-general-availability-of-amazon-q-the-most-capable-generative-ai-powered-assistant-for-accelerating-software-development-and-leveraging-companies-internal-data), representing a significant step forward in our joint mission to transform software development. This deeper integration of AI capabilities marks the beginning of our expanded collaboration with AWS. As we continue to evolve these capabilities, we'll focus on:\n\n* Extending AI features across the development lifecycle  \n* Enhancing developer productivity  \n* Meeting enterprise development demands at scale\n\n**GitLab Duo with Amazon Q is available today on a [public branch](https://gitlab.com/groups/gitlab-org/-/epics/16059) in the GitLab.org project. To get access to a preview and learn more about how it can transform your software development process, visit [our website](https://about.gitlab.com/partners/technology-partners/aws/#interest).**",[1181,835,9,496,233],{"slug":1955,"featured":91,"template":693},"gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","content:en-us:blog:gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","Gitlab Duo With Amazon Q Devsecops Meets Agentic Ai","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"_path":1961,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1962,"content":1968,"config":1974,"_id":1976,"_type":14,"title":1977,"_source":16,"_file":1978,"_stem":1979,"_extension":19},"/en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"title":1963,"description":1964,"ogTitle":1963,"ogDescription":1964,"noIndex":6,"ogImage":1965,"ogUrl":1966,"ogSiteName":706,"ogType":707,"canonicalUrls":1966,"schema":1967},"GitLab Duo Workflow: Enterprise visibility and control for agentic AI","Secure, autonomous, context-aware AI agents take on complex tasks, freeing developers to ship innovative software faster. Private beta waitlist now open.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660174/Blog/Hero%20Images/Workflow_1800x945.png","https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Workflow: Enterprise visibility and control for agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pini Wietchner\"}],\n        \"datePublished\": \"2025-02-24\",\n      }",{"title":1963,"description":1964,"authors":1969,"heroImage":1965,"date":1971,"body":1972,"category":10,"tags":1973},[1970],"Pini Wietchner","2025-02-24","Today, we're excited to announce the opening of the waitlist for the [private beta of GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/agent-platform/): **agentic AI built on top of the most comprehensive DevSecOps platform.** The next step in our AI roadmap, GitLab Duo Workflow will help development teams navigate everything from project bootstrapping to deployment processes, from debugging issues to cross-team coordination, all within the IDE.\n\nGitLab Duo Workflow leverages the GitLab platform's structure for collaboration, continuous integration, continuous deployment, security, and compliance to help organizations as they accelerate their development process with AI agents.\n\nUse GitLab Duo Workflow to help you:\n* [bootstrap a new development project](#from-slow-project-setup-to-a-running-start)\n* [modernize code](#from-legacy-code-to-modern-applications)\n* [perform contextual tasks](#from-context-switching-to-flow-state)\n* [create documentation](#from-stale-docs-to-dynamic-knowledge)\n* [enhance test coverage](#from-patchy-to-comprehensive-testing)\n* and more\n\nThis is just the beginning. With GitLab’s unified data store, the more you use GitLab, the more context GitLab Duo Workflow has about your code, configurations, security findings, and deployment practices. The result: an increasingly powerful development experience that's tailored to your organization.\n\n## The promise and challenge of AI agents\n\nSoftware has fundamentally changed the world, but only a tiny fraction of the world's population has the skills to build software today. Yet, these developers reach billions of people with smartphones and internet connections. Just imagine a world where *more* people can build, secure, and deliver production-ready software – there will be an explosion of innovation as more people can create software that impacts billions. **Agentic AI will make that happen.**\n\nAI agents understand context, maintain knowledge of entire codebases, and actively collaborate on complex software projects across development, security, and operations. With AI agents, developers can create software at a scale previously unimaginable for individuals or even teams.\n\nBut this shift raises important questions about visibility, control, and how AI will impact developers' work. Organizations need to ensure AI enhances their developers' capabilities while enabling them to maintain oversight of their development process. The key to success isn't just adopting AI – it's adopting it in a way that empowers developers while preserving security, compliance, and governance.\n\n## AI's success depends on your platform, not more add-on tools\n\nWhen you're working with more developers, code, and potential security risks, adding separate tools for each new challenge only creates more complexity. Our most recent [DevSecOps Survey](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) shows just how serious this problem is: DevSecOps teams are juggling up to 14 different tools, with professionals spending up to 80% of their time on non-coding tasks. For AI to be truly effective, it also needs high-quality, unified data. That's hard to achieve with disparate tools.\n\n**The GitLab DevSecOps platform combined with GitLab AI agents** brings everything together in a single data model that encapsulates source code, merge requests, epics, users, access rights, and more. The agents we're building use context about users and projects to standardize how teams work and automate the non-coding tasks that absorb developer time, such as scanning for security issues and enforcing compliance rules. When AI is built directly into the platform, these capabilities become even more powerful, turning AI agents into development partners while keeping you in control of how AI enhances the process.\n\n**This isn't a far-off future — it's what we're building right now with GitLab Duo Workflow.**\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1059060959?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo Workflow, the future of secure agentic AI software development\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>`\n\n## GitLab Duo Workflow: AI agents on the most comprehensive DevSecOps platform\n\nLeveraging GitLab's end-to-end DevSecOps platform, GitLab Duo Workflow helps developers work at their highest potential. While AI coding assistants help with individual pieces of code, GitLab Duo Workflow will understand your entire development lifecycle – automating routine tasks so developers can focus on strategic innovation and creative problem-solving. As we develop GitLab Duo Workflow, here’s what it will be able to help teams achieve:\n\n### From slow project setup to a running start\n\nDevelopers spend precious time configuring new projects, managing dependencies, and setting up basic infrastructure instead of building new features. With GitLab Duo Workflow, you can **automate project bootstrapping directly in the IDE**, providing the right configurations from the start so you can focus on innovation sooner.\n\n### From legacy code to modern applications\n\nModernizing legacy code is more than just updating syntax — it requires understanding dependencies, tests, CI/CD pipelines, and documentation. GitLab Duo Workflow helps **modernize your codebase by handling code refactoring** – from code to tests.\n\n### From context switching to flow state\n\nToday, developers constantly switch between tools, docs, and codebases to solve problems. GitLab Duo Workflow will help **resolve tasks with the full context of your codebase-related issues and merge requests**, letting developers stay in their flow.\n\n### From stale docs to dynamic knowledge\n\nDocumentation becomes stale quickly, making codebases harder to understand and maintain. GitLab Duo Workflow **supports developers in generating and updating documentation**, including README files, code flow diagrams, and architecture documentation.\n\n### From patchy to comprehensive testing\n\nAs codebases grow, maintaining comprehensive test coverage becomes increasingly challenging. GitLab Duo Workflow **can generate tests for entire sections of your codebase** while integrating with your existing test infrastructure, ensuring more reliable software with less effort.\n\n## Sign up for the private beta waitlist\n\n[Sign up for the GitLab Duo Workflow private beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see the next step in our vision for secure agentic AI – from project setup to deployment. Built on GitLab's DevSecOps platform, these agents understand your entire software lifecycle while maintaining the enterprise-grade security and control organizations require.\n\n*Disclaimer: This page contains information about upcoming products, features, and functionality. This information is for informational purposes only and should not be relied upon for purchasing or planning. All items are subject to change or delay, and the development, release, and timing remain at GitLab Inc.'s sole discretion.*",[496,9,717,738,1181,759],{"slug":1975,"featured":91,"template":693},"gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","content:en-us:blog:gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","Gitlab Duo Workflow Enterprise Visibility And Control For Agentic Ai","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"_path":1981,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":1982,"content":1987,"config":1993,"_id":1995,"_type":14,"title":1996,"_source":16,"_file":1997,"_stem":1998,"_extension":19},"/en-us/blog/gitlab-flow-duo",{"title":1983,"description":1984,"ogTitle":1983,"ogDescription":1984,"noIndex":6,"ogImage":868,"ogUrl":1985,"ogSiteName":706,"ogType":707,"canonicalUrls":1985,"schema":1986},"Combine GitLab Flow and GitLab Duo for a workflow powerhouse ","Add the AI-powered capabilities of GitLab Duo to GitLab Flow to boost the efficiency of DevSecOps workflows. This is a guide for deployment in your environment, including a video tutorial.","https://about.gitlab.com/blog/gitlab-flow-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Combine GitLab Flow and GitLab Duo for a workflow powerhouse \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2023-07-27\",\n      }",{"title":1983,"description":1984,"authors":1988,"heroImage":868,"date":1989,"body":1990,"category":10,"tags":1991,"updatedDate":1992},[733],"2023-07-27","Starting out with DevSecOps requires a well-thought-out workflow, but that can sometimes seem like a daunting challenge. Luckily, there are two things that can help: GitLab Flow and GitLab Duo. GitLab Flow is a prescribed approach to help organizations successfully apply DevSecOps processes. GitLab Duo is a [powerful set of AI-powered capabilities](https://about.gitlab.com/blog/supercharge-productivity-with-gitlab-duo/) within the GitLab DevSecOps Platform that can help organizations develop code, improve operations, and secure software more efficiently. Combined, GitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency, which can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability.\nIn this article, we delve into how GitLab Flow and GitLab Duo can be used together to help organizations be successful with DevSecOps.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## What is GitLab Flow?\nGitLab Flow is a prescribed and opinionated end-to-end workflow for the development lifecycle of applications when using GitLab, an AI-powered DevSecOps platform with a single user interface and a single data model. GitLab Flow is based on best practices and lessons learned from customer feedback and our dogfooding. Furthermore, GitLab Flow spans across the [stages of the DevSecOps lifecycle](https://about.gitlab.com/stages-devops-lifecycle/), forming an efficient workflow with an inner feedback loop for reviewing a specific update and an outer feedback loop for improving the entire application, as well as the development lifecycle itself. \n\n![The GitLab Flow inner and outer loops](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-feedback-loops.png)\n\u003Ccenter>The GitLab Flow inner and outer loops\u003C/center>\u003Cp>\u003C/p>\n\nAs you can see by the many stages in GitLab Flow, there is much more to developing software than writing code. Below, we'll dive into each step of GitLab Flow and how GitLab Duo can help. \n\n### Planning\nThe first portion of GitLab Flow is planning, which sits on the outer feedback loop of GitLab Flow. It encompasses issues, merge requests, epics, milestones, iterations, release, release evidence, and more. Let’s cover what roles these components play in GitLab Flow and how GitLab Duo can help.\n\n![Planning - first portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-planning-portion.png)\n\u003Ccenter>Planning - first portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n#### Issues\nIssues are where product problems or new features are defined and where team members can collaborate. As an issue is created, you can populate its title and then leverage GitLab Duo **Issue description generation** capability to help enrich the description field, saving time and effort. Because many stakeholders can participate in comment threads on an issue, **Discussion summary** is an AI-powered capability in GitLab Duo that can summarize hundreds of comments on an issue into a concise paragraph so that a stakeholder can quickly get caught up with the conversation, jump into the discussion, and become productive right away.\n\nIssues can be organized and visualized in issue boards, which are a software project management tool that can be used as kanban or Scrum boards. These boards help teams plan, organize, and visualize a workflow for a feature or product release. Different categories of boards can be created and issues can be moved from one board to another one with a simple drag and drop.\n\n#### Merge requests\nMerge requests are where solutions are developed. As release components, issues and merge requests provide the auditability and tracking of application changes done by stakeholders, such as DevOps and platform engineers, system and database administrators, security engineers, and developers. In addition, issues and merge requests are key inputs for the release planning process.\n\nMerge requests can be individually created or created from an issue. Creating a merge request from an issue automatically relates it to that issue so when the merge request is merged its associated issue is automatically closed. Merge requests can also be manually related to an issue.\n\n![Merged merge request will close issue](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/mr-with-its-issue.png)\n\u003Ccenter>Merged merge request will close issue\u003C/center>\u003Cp>\u003C/p>\n\nLike issues, merge requests can include a long list of updates to a feature branch by many stakeholders. Collaborators who need to familiarize themselves with or understand all of the updates included in a merge request can take advantage of the **Merge request summary** capability in GitLab Duo to quickly get caught up on the changes. In addition, collaborators can invoke GitLab Duo **Code Merge request template population**, which uses a pre-created merge request template and automatically fills in the content for sections in it. Description templates provide a way to standardize and optimize collaboration and communication across the development lifecycle and GitLab Duo speeds this up even more!\n\nIssues with the same theme can be grouped together in an epic to organize the work to be done. Epics can have child issues and sub-epics and/or be linked to epics across the organization. Iterations can be used to track sprints of work, and can be manually scheduled or scheduled automatically using GitLab iteration cadences to streamline planning workflows. In addition, iterations include burndown and burnup charts. Burndown charts help track overall progress towards a project's total scope, while burnup charts track the daily total count and weight of issues added to and completed in a given timebox.\n\n#### Milestones\nTeams can use milestones to organize issues and merge requests into a cohesive group with an optional start date and an optional due date. Milestones are typically used to track releases and can track issues and merge requests at a project level or group level. Similar to iterations, milestones also provide burndown and burnup charts to show progress.\n\nMilestones can be associated with a release, whose automated creation generates many artifacts, including the release evidence. The release evidence is an automatically collected snapshot of data that’s related to the release. In addition to test artifacts and linked milestones, job artifacts can optionally be included in the release evidence, which can facilitate internal processes such as external audits.\n\nEpics, milestones, and iterations can be visualized via the Roadmaps page, which helps track release progress and streamline the release process. \n\nOnce the planning takes place, the work towards the resolution of a problem or a new feature can start. This happens in merge requests. Let’s delve deeper into how that happens in GitLab Flow. \n\n> [Learn more by trying GitLab Flow and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F).\n\n### Merge requests and pushing code\n\n![Merge requests and pushing code - second portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-mr-pushing-code-portion.png)\n\u003Ccenter>Merge requests and pushing code - second portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe second portion of GitLab Flow is related to merge requests and pushing code. As mentioned earlier, merge requests are where solutions are developed through collaboration among stakeholders across the organization. This collaboration can happen in a distributed manner and asynchronously. Participants can take advantage of collaborative capabilities, such as tagging, inline suggestions, inline comments, merge request comments, review threads, and review requests, which can help improve code quality, availability, reliability, and performance. Right after the creation of the merge request is the start of the GitLab Flow inner feedback loop, which is where code and fix pushes, test and scan runs, and collaboration and update reviews take place.\n\n#### Pipelines\nAs updates are applied to a feature branch via merge requests, pipelines — if defined — are automatically executed. Pipelines can have multiple stages and jobs to build and test, and then deploy the application or microservice to a review environment. In that review environment, the updates can be dynamically verified before they are merged to the main branch. This automation helps streamline the application update and review processes.\n\nIn addition, as DevSecOps teams make updates to the application via merge requests, they have a variety of AI-powered capabilities at their disposal. As they write or update code, GitLab Duo **Code Suggestions** recommends code that should come next and the developer can choose to accept or ignore the recommendation. Code Suggestions support code generation via prompts as well as code completion as you type. Code Suggestions can help improve the programming experience by reducing errors and helping developers write code faster, which can help enhance production code quality. Code Suggestions also can lead to higher developer productivity and faster iterations and rollouts.\n\nAs different stakeholders within the organization participate in the development or review of applications, they may encounter code that is poorly documented, complex or difficult to understand, or is written in a programming language unfamiliar to them. The GitLab Duo **Code explanation** capability explains code in natural language so that everyone can understand the code and get up to speed quickly.\n\nMoreover, when updates are committed to the feature branch, the GitLab Duo **Suggested reviewers** capability uses the changes in a merge request and a project’s contribution graph to suggest appropriate reviewers in the reviewer dropdown in the merge request sidebar. The list includes users that are knowledgeable about a specific aspect of the application and would be the best candidates to review the updates. Developers save time by not having to search and identify adequate reviewers, streamlining the review process and avoiding delays and low-quality reviews.\n\nWhen developers make changes to the code, they often don't include a comment in the merge request about the specific changes they made. The GitLab Duo **Merge request summary** capability allows the author of merge request changes to use AI to generate a natural-language comment that summarizes the updates to the code. Reviewers then can better understand the changes and streamline the entire review process\n\nAs reviewers review updates to the code in a merge request, they can create a review block, which can consist of many comments spanning many source files. To help the original author of the updates better understand the feedback provided by the reviewer in a long review block, the GitLab Duo **Code review summary** capability generates a natural-language summary of the reviewer’s feedback. This enables better handoff between authors and reviewers, streamlining the review process.\n\nFurthermore, when developers add new code via a merge request, they can leverage the GitLab Duo **Test generation** capability to use AI to generate unit tests for the new code. This can help to increase developer productivity, improve test coverage, and catch bugs early in the development lifecycle. Developers can also leverage GitLab Duo **Chat**, which is always accessible, to refactor code and generate in-line documentation, e.g. docstrings, for their source code.\n\nWhile pipelines execute on branch updates, they can include automated tests and scans, which helps in shifting security left.\n\n### Shifting security left\n\n![Shifting security left - third portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-shift-sec-left-portion.png)\n\u003Ccenter>Shifting security left - third portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe third portion of GitLab Flow is shifting security left, which is also part of the GitLab Flow inner feedback loop.\n\nIn addition to DevOps and platform engineers, system and database administrators, and developers, some of the stakeholders collaborating in a merge request may be concerned about security and compliance, which is where automated tests and security scans play a role. Scans can be simply included in a pipeline via readily available templates and/or can be automatically executed within a merge request pipeline. GitLab provides a broad set of built-in security scanners and analyzers that can be leveraged by GitLab Flow, but the DevSecOps platform can also accommodate third-party and custom scanners.\n\nGitLab Flow shifts security left in the pipeline to detect and resolve defects as early as possible in the software development process. It is much simpler and cheaper to fix vulnerabilities early in the development cycle than once the application is in production, where an unscheduled outage can affect your users and revenue.\n\nThe built-in security scanners and analyzers provided by GitLab include: unit testing, infrastructure-as-code (IaC) scanning, static application security testing (SAST) scanners, dependency scanning, secret detection, container scanning, API security, web API fuzz testing, and coverage-guided fuzz testing. In addition, GitLab provides a variety of security dashboards and reports to manage and visualize vulnerabilities, such as the Dependencies list, Security dashboard, Vulnerability Report, and vulnerability pages.\n\nTo help developers and security engineers better understand and remediate vulnerabilities more efficiently, the GitLab Duo **Vulnerability explanation** capability provides an explanation about a specific vulnerability, how it can be exploited, and, most importantly, a recommendation on how to fix the vulnerability. Developers can also take advantage of GitLab Duo **Vulnerability resolution**, which automatically creates a merge request that includes code changes to fix the vulnerability. These AI-powered capabilities can help streamline and optimize the process of securing and hardening an application to prevent vulnerabilities that can be exploited by cyber attacks in production.\n\nBesides SAST scanners, GitLab provides dynamic application security testing (DAST) scanners, which require a running application. When leveraging these scanners, GitLab is capable of automatically provisioning a DAST environment for the DAST scans and then performing a complete cleanup of all resources post-DAST testing. In addition, for running containers, GitLab provides operational container scanning, which scans container images in your cluster for security vulnerabilities.\n\nThe scans mentioned above can be executed automatically within a merge request pipeline or, in some cases, can be scheduled for execution via scan execution and merge request approval policies. These policies can be defined via the GitLab UI or YAML files and are configured in a separate project, allowing segregation of duties for reusability, maintenance, and management. Scan execution policies require that security scans be run on a specified schedule or with the project pipeline, and merge request approval policies take action based on scan results. Security engineers or teams can define these policies to enforce security processes across the organization and GitLab Flow may encounter or leverage these as it spans through its steps.\n\nTo enforce security and compliance across projects in your organization, you can use compliance labels and pipelines. Compliance labels and pipelines can be made mandatory to execute before a project’s own pipeline. With this approach, you can ensure that all teams within your organization meet your security and compliance standards. In addition, you can secure your applications against cyber attacks, conform to government compliance standards, and always be audit-ready.\n\nThe main goal of all of these GitLab Flow security prescriptions is to fix vulnerabilities early in the development cycle rather than once the application is in production, where remediating a vulnerability can prove to be very costly in reputation and revenue.\n\nAs vulnerabilities are mitigated within the GitLab Flow inner feedback loop and more updates are applied to the application in the feature branch, stakeholders need to re-review these updates to ensure that the updates have taken place and no regressions have inadvertently been introduced.\n\n### Continuous review\n\n![Reviews - fourth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-reviewing-features-portion.png)\n\u003Ccenter>Reviews - fourth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe next portion of GitLab Flow is reviewing features, which prescribes the continuous review of applications. Reviewing features involves the ability to stand up a review environment to which the interim application (feature branch) is deployed so that stakeholders can review it in real time and provide feedback. The interim application can then be continuously adjusted until it is ready to be merged to the main branch. GitLab Flow also prescribes the cleanup of all provisioned review environment resources at the moment when the merge request is merged to the main branch.\n\nThis iterative automated review process is part of the inner feedback loop in GitLab Flow. As mentioned above, within the inner feedback loop, GitLab Duo capabilities like Code explanation, Code Suggestions, Suggested reviewers, Merge request summary, Merge request template population, Code review summary, Vulnerability explanation, Vulnerability resolution, and Root cause analysis are prescribed by GitLab Flow to enable a better handoff between authors and reviewers and streamline the entire review process.\n\nThe GitLab Flow inner feedback loop terminates when all review items are addressed and the merge request is approved and merged to the main branch, which triggers the deployment of the application to production.\n\n### Deploying applications and infrastructure\n\n![Deploying - fifth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-deploy-apps-portion.png)\n\u003Ccenter>Deploying - fifth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nDepending on an organization’s needs, either continuous delivery or continuous deployment is prescribed by GitLab Flow. Whereas continuous delivery is the frequent release of code by triggering the deployments manually (e.g., to production), continuous deployment is the automated release of code (e.g., to production) without human intervention. Let’s cover continuous delivery first.\n\nAs you release your software using continuous delivery, you have a few deployment options. You can establish a freeze window and then deploy using advanced deployment techniques, such as canary, blue/green, timed, and incremental rollouts. Incremental rollouts can lower the risk of production outages delivering a better user experience and customer satisfaction. Advanced deployment techniques can also improve development and delivery efficiency, streamlining the release process.\n\nAs you release your software using continuous deployment, all changes/updates go directly to production. Progressive delivery approaches like feature flags, which allow you to separate the delivery of specific features from a launch, are a good way to reduce risk and manage what functionality to make available to production users. Feature flags support multiple programming languages and allow developer experimentation and controlled testing. You can even use feature flags to roll out features to specific users.\n\nAlthough GitLab supports all these deployment approaches, GitLab Flow allows for the adoption of the approach that best fits the organization and/or specific project needs.\n\n### Monitoring applications and DevSecOps processes\nOnce your application has been deployed to production, it needs to be continuously monitored to ensure its stability, performance, and availability. In addition, as the DevSecOps processes execute, they are measured, providing the opportunity to improve their performance and efficiency. The monitoring capabilities are provided by GitLab and, as such, can be leveraged by GitLab Flow.\n\nFor running containers, GitLab provides operational container scanning (OCS), which scans container images in your cluster for security vulnerabilities. These scans can be automated by scheduling them when to run and any found vulnerabilities are automatically displayed in a security dashboard. The OCS can help keep your cluster applications secure and preempt any cyber attacks that can lead to leaks of private data and even cause unexpected outages.\n\nError tracking allows developers to discover and view errors generated by their application. All errors generated by your application are displayed in the Error Tracking list in GitLab. Error tracking can help with availability and performance of your applications by detecting and resolving unexpected application conditions fast.\n\nGitLab can accept alerts from any monitoring source, including Prometheus, via a webhook receiver. As alerts come in, they are displayed in the GitLab Alerts list, from which you can manually manage them. Alerts can also automatically trigger the creation of incidents, ChatOps, and email messages to appropriate individuals or groups. All these capabilities streamline the alert resolution and management process.\n\nAs incidents are created, due to production problems, they appear in the GitLab Incidents list for incident management. You can manage one or more incidents, sort them, search them, assign them, set their statuses, and even see their SLA preset countdown timer. Moreover, you can create on-call schedules and rotations, escalation policies, and set up paging and notifications to handle incidents. In addition, you can link an incident to an alert so that when the incident is closed, its associated alert is automatically resolved. Incident timelines are another capability for executives and external viewers to see what happened during an incident, and which steps were taken for it to be resolved. All these capabilities streamline the incident management process so that they can be resolved as quickly as possible.\n\nAudit events track important events, including who performed the related action and when in GitLab. These events are displayed in the GitLab Audit Events list and provide, among others, the action that was taken on an object, who did it, and the date and time of its occurrence.\n\nAll the lists and dashboards mentioned above can help preempt out-of-compliance scenarios to avoid penalties as well as streamline audit processes. For your running applications, they generate the data and metrics that can be used in the GitLab Flow outer feedback loop to help improve and optimize your applications and lower the risk of unscheduled production outages.\n\n### Continuous improvement\nWhen applying GitLab Flow, you also have the opportunity to use the insight that GitLab provides in the form of end-to-end process metrics dashboards to continuously improve not just your application but also your software delivery performance. These dashboards and their metrics are auto-generated by GitLab and are always available.\n\n### The Value Stream Analytics dashboard\n\nYou can track and monitor your application development lifecycle through the Value Stream Analytics Dashboard, where you can check project or group statistics over time. This dashboard is customizable but you can get started quickly by creating a value stream using a GitLab-provided default template. The default dashboard displays metrics for each of the pre-defined stages of your value stream analytics, namely Issue, Plan, Code, Test, Review, and Staging, as well as a graph with the average time to completion for each. It also shows the value stream analytics key metrics: lead time, cycle time, new issues, commits, and deploys. You can use these metrics to find areas of improvement in the stages of your value stream.\n\n### DORA metrics dashboard\n\nTo view the performance metrics that measure the effectiveness of your organization’s development and delivery practices, GitLab provides the [DORA](https://about.gitlab.com/solutions/value-stream-management/dora/) (DevOps Research and Assessment) metrics dashboard, which displays four key metrics: Deployment Frequency, Lead Time for Changes, Time to Restore Service, and Change Failure Rate. Deployment Frequency measures how often your organization deploys code to production or releases it to end users. Lead Time for Changes measures how long it takes to go from code committed to code successfully running in production. Time to Restore Service measures the time needed to restore services to the level they were previously, in case of an incident. Finally, Change Failure Rate is the percentage of changes to production or released to users that resulted in a degraded service (for example, a change that caused a service impairment or outage) and subsequently required remediation (required a hotfix, rollback, patch). These four key metrics are outcomes of your current processes and give you the opportunity to improve the factors and capabilities that drive them.\n\n### Customize your dashboard\n\nAnother dashboard is the Value Streams Dashboard, which is a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for software development improvements. The metrics shown are the DORA metrics followed by the value stream analytics flow metrics and counts for critical and high vulnerabilities for the month to date, the two preceding months, and the past six months.\n\nGitLab Duo can also help in your continuous improvement efforts. For example, the **Value stream forecasting** capability takes historical data and uses data trends across your development lifecycle to predict the future behavior of your value stream metrics. You can use these predictive analyses in your optimization initiatives.\n\nAll these dashboards and the metrics they report on are part of the GitLab Flow outer feedback loop to help you lower the risk of unscheduled production outages and improve and optimize your applications and DevSecOps workflows.\n\n### AI impact analytics\nTo better understand the impact of the use of GitLab Duo (or AI) along the entire development life cycle, you can check the [AI Impact analytics](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/), from where you can see how the adoption of GitLab Duo Code Suggestions impacts other performance, quality and security metrics. You can visualize the last six months of AI adoption and its impact on other metrics, such as cycle time, lead time, deployment frequency, change failure rate, and critical vulnerabilities over time.\n\nAI impact analytics help to measure adoption, effectiveness and benefits that AI brings to teams and organizations and also to identify areas for improvement.\n\n## Why use GitLab Flow?\nGitLab Flow is a prescribed approach, practiced by our customers and users worldwide, that can provide the following benefits: \n- Higher productivity via the automation capabilities provided by GitLab and its single user interface and data model, all leveraged by GitLab Flow\n- Accurate insights into the end-to-end DevSecOps lifecycle to support continuous improvement\n- Built-in dashboards and metrics that can help you optimize your applications and DevSecOps processes\n- Higher code quality and improved reliability and availability of your applications\n- Better application security through built-in security scanners and capabilities\n- Compliance- and audit-readiness via built-in compliance features\n- Shorter cycle times that can help you increase deployment frequency\n- Continuous review enabled by the GitLab Flow inner feedback loop\n- The GitLab Flow inner feedback loop can help you optimize application updates leading to better code quality and higher reliability and availability of your applications\n- The GitLab Flow outer feedback loop can help you improve your applications as well as the development lifecycle itself\n- High levels of collaboration among stakeholders in your organization\n- Shifting security left to help find vulnerabilities in applications before they make it to production to avoid costly, unscheduled outages\n- Lower risk when deploying to production via the advanced deployment techniques and progressive delivery approaches supported by GitLab\n- AI-powered capabilities that span across the entire development lifecycle and can boost productivity, code quality, continuous improvement, security and compliance, and more\n- Support for cloud-native and non-cloud-native applications\n- Multi-cloud support for hybrid/multi-cloud applications\n- Shifting security left to help you find vulnerabilities in your applications before they make it to production so that you can avoid costly unscheduled outages\n\nHow can you get started with GitLab Flow? Leveraging GitLab Auto DevOps or parts of it is a good starting point for applying GitLab Flow principles to your application development lifecycle.\n\n## GitLab Flow and Auto DevOps\n\n![Auto DevOps - an instantiation of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/ado-pipeline.png)\n\u003Ccenter>Auto DevOps - an instantiation of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) applies GitLab Flow throughout all its stages and jobs. You can think of it as a good example for the instantiation of GitLab Flow.\n\nAuto DevOps is a collection of predefined, out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, these templates automatically detect, build, test, deploy, and monitor your applications.\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental/timed fashion.\n\nAuto DevOps gets you started quickly, increasing developer productivity, and it can be easily customized to your needs, with support for the most common programming frameworks and languages. Auto DevOps is modular, customizable, and extensible, which allows you to leverage pieces of it in your pipelines or apply all of it for your application.\n\n## Get started\n[Combine GitLab Flow and GitLab Duo today](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F) to achieve significant improvements in end-to-end workflow efficiency that can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. \n\nIf you'd like to see a workflow in action that combines GitLab Flow and GitLab Duo and how it can benefit you, watch the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CKrZ4_tKY4I?si=Kf6QsYFIzKkJZpJd\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[109,9,716],"2024-06-18",{"slug":1994,"featured":6,"template":693},"gitlab-flow-duo","content:en-us:blog:gitlab-flow-duo.yml","Gitlab Flow Duo","en-us/blog/gitlab-flow-duo.yml","en-us/blog/gitlab-flow-duo",{"_path":2000,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2001,"content":2007,"config":2015,"_id":2017,"_type":14,"title":2018,"_source":16,"_file":2019,"_stem":2020,"_extension":19},"/en-us/blog/gitlab-global-devsecops-ai-report",{"title":2002,"description":2003,"ogTitle":2002,"ogDescription":2003,"noIndex":6,"ogImage":2004,"ogUrl":2005,"ogSiteName":706,"ogType":707,"canonicalUrls":2005,"schema":2006},"GitLab DevSecOps AI Report: A new software development era","Our survey found that DevSecOps teams are optimistic about AI, but privacy, security, and training emerged as key challenges to successful AI adoption.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663908/Blog/Hero%20Images/2023-devsecops-report-blog-banner2.png","https://about.gitlab.com/blog/gitlab-global-devsecops-ai-report","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Global DevSecOps AI Report: Ushering in a new era of software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ashley Kramer\"}],\n        \"datePublished\": \"2023-09-12\",\n      }",{"title":2008,"description":2003,"authors":2009,"heroImage":2004,"date":2011,"body":2012,"category":10,"tags":2013},"GitLab Global DevSecOps AI Report: Ushering in a new era of software development",[2010],"Ashley Kramer","2023-09-12","\nAI has taken the world by storm, creating a tectonic shift across industries and society as a whole. With it, important discussions have emerged about its beneficial applications and the potential for negative repercussions, especially in the field of software development. AI is already changing how software is designed, built, secured, and deployed. With all of the industry buzz around the technology, it’s more important than ever to separate the hype from reality. \n\nThat’s why I’m pleased to share that GitLab has recently released its [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai). With this survey, we had one goal in mind: to uncover whether AI is living up to its promise. We surveyed more than 1,000 global senior technology executives, developers, and security and operations professionals to understand how organizations use AI in software development today, and what they hope to achieve with it in the future.\n\nMany of the sentiments reflected in the report echo what I hear firsthand from customers across industries and across the world: An eagerness to harness the benefits of AI for business innovation, while remaining cautious of potential risks. \n\nPerhaps unsurprisingly, privacy and security, productivity, and training emerged as key challenges to successful AI adoption. Organizations recognize that the policies and strategies put into place now, and the ways in which we shift our workflows to incorporate AI, will shape the future of software development. \n\nLet’s dive into a few of the key findings.\n\n> Download the full [Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai).\n\n## As the excitement around AI increases, so do security concerns\nSecurity and privacy are top concerns, but they don’t detract from the urgency to implement AI. While 83% of those surveyed said that implementing AI in their software development processes is essential to avoid falling behind, 79% noted that they are concerned about AI tools having access to private information or intellectual property.\n\nThe top concern, by far, is the potential for sensitive information such as customer data being exposed (72%), and nearly half of respondents (48%) said they were concerned that trade secrets may be exposed. \n\nSimilarly, when it comes to the biggest concerns around introducing AI into the software development lifecycle, 48% said that AI-generated code may not be subject to the same copyright protection as human-generated code.\n\nGiven these results, it’s not surprising that 95% of senior technology executives said they prioritize privacy and protection of intellectual property when selecting an AI tool.\n\nTo safely benefit from AI, organizations can avoid pitfalls, including data leakage and security vulnerabilities, by first deploying it in a low-risk environment in their organization. This enables teams to learn by trial and error and build best practices before allowing additional teams to adopt AI, ensuring it scales safely and sustainably. \n\n## AI is poised to increase the productivity of some teams — and increase the workload of others\nDifferent business functions have different goals and use cases for AI, highlighting conflicting areas of opportunity and concern. \n\nOur survey findings show that 40% of security practitioners are worried that AI-powered code generation will increase their workload. However, code generation is just one of many areas where AI can add value. In our survey, developers told us that they spend only 25% of their total time writing code. The rest is spent improving existing code, understanding code, testing and maintaining code, and identifying and mitigating security vulnerabilities. Organizations stand to see major productivity and collaboration benefits by applying AI across the software development lifecycle.\n\t\t\t\nApproximately 50% of respondents expressed interest in AI-powered use cases across the software development lifecycle beyond code generation. In other words, there’s a strong appetite for more — and more integrated — AI spanning the breadth of the software development lifecycle. \n\n## Companies and employees are at odds over how to bridge the AI skills gap \nWhile organizations reported optimism about their company’s use of AI, our survey shows a discrepancy between organizations’ and practitioners’ satisfaction with AI training resources. \n\nDespite 75% of respondents saying their organization provides training and resources for using AI, a roughly equal proportion also said they are finding resources on their own, suggesting that the available resources and training within organizations may be insufficient. \n\nWith AI introducing a new set of skills to learn, 34% of respondents said they need training to use or interpret AI, and developers were significantly more likely to lack confidence in AI-generated output than either security or operations respondents (38% compared to 28% and 28%, respectively). \n\nOrganizations should focus on providing AI training and resources to all job roles and functional areas that will be using AI, and it is especially important to ensure that the resources for development teams are relevant, up to date, and cover the latest AI technologies and applications.\n\n## But wait, there’s more\nThese findings reinforce that for organizations to benefit from AI, it needs to be secure and delivered in a single application that is embedded across the entire software development lifecycle. \n\nThese core tenets guide our vision for the future of the GitLab AI-powered DevSecOps platform and [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities that enables organizations to boost efficiency, productivity, and collaboration. Only by adopting a privacy-first, integrated approach to implementing AI can organizations be confident that their intellectual property is safe while empowering everyone to deliver better, more secure software faster. \n\nTo explore the full report, [download the Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai).\n",[716,9,2014],"developer survey",{"slug":2016,"featured":6,"template":693},"gitlab-global-devsecops-ai-report","content:en-us:blog:gitlab-global-devsecops-ai-report.yml","Gitlab Global Devsecops Ai Report","en-us/blog/gitlab-global-devsecops-ai-report.yml","en-us/blog/gitlab-global-devsecops-ai-report",{"_path":2022,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2023,"content":2028,"config":2033,"_id":2035,"_type":14,"title":2036,"_source":16,"_file":2037,"_stem":2038,"_extension":19},"/en-us/blog/gitlab-jetbrains-neovim-plugins",{"title":2024,"description":2025,"ogTitle":2024,"ogDescription":2025,"noIndex":6,"ogImage":868,"ogUrl":2026,"ogSiteName":706,"ogType":707,"canonicalUrls":2026,"schema":2027},"GitLab plugins for JetBrains and Neovim now available in Beta","GitLab plugins for JetBrains IDEs and Neovim are now available in Beta, bringing GitLab Duo Code Suggestions to more software development environments.","https://about.gitlab.com/blog/gitlab-jetbrains-neovim-plugins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab plugins for JetBrains and Neovim now available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-07-25\",\n      }",{"title":2024,"description":2025,"authors":2029,"heroImage":868,"date":2030,"body":2031,"category":10,"tags":2032},[1493],"2023-07-25","\n\n_This blog post is the latest in an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). Start with the first blog post: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab._\n\nIn June, we shared our plans to [extend AI-powered code suggestions](/blog/extending-code-suggestions/) to more IDEs, thereby continuing to help enhance developer productivity. A few weeks ago, we [announced](/blog/gitlab-visual-studio-extension/) the availability of our [extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio). Now, we're adding support for JetBrains and Neovim with official plugins to further extend the reach of GitLab Duo Code Suggestions and help enhance developer productivity across even more development environments.\n\nThese new GitLab plugins for both JetBrains and Neovim support [GitLab Duo](https://about.gitlab.com/gitlab-duo/) Code Suggestions for both [GitLab SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) and [GitLab self-managed](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n## GitLab for JetBrains IDEs\nYou can download the GitLab for JetBrains plugin from the [JetBrains Plugin Marketplace](https://plugins.jetbrains.com/plugin/22325-gitlab) or from directly within your IDE by visiting `Settings` -> `Plugins` and then searching for `GitLab`. Once you've installed the plugin, follow the [setup instructions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin#setup) to configure authentication and get started.\n\n![JetBrains GitLab Duo Code Suggestions Settings](https://about.gitlab.com/images/blogimages/jetbrains-code-suggestions-settings.png)\n\nYou can verify if the extension is connected and working by checking the status bar icon. If everything looks good, you're ready to start receiving code suggestions as you work. Just start typing and GitLab Duo will automatically provide you suggestions inline. Press `Tab` to accept the suggestions or keep typing to receive new suggestions.\n\n![JetBrains GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/jetbrains-code-suggestions-ghost-text.png)\n\nWe look forward to hearing from you about this initial release! You can provide feedback or report any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin/-/issues/38).\n\n## GitLab for Neovim\nThe [GitLab for Neovim plugin](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim) can be found on GitLab and you can [follow the instructions to get started](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim#getting-started). Once you've downloaded the plugin, there are [configuration options](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim#configuration) available to help customize your experience.\n\nOnce you've configured the plugin you'll be able to receive suggestions directly within the UI. Check out our demo video below to learn more about the plugin.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/PRSPQvbFquU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe're continuing to iterate on our Neovim plugin. You can provide feedback or report any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim/-/issues/22).\n\n## Iterating on AI/ML features\nThese new additions to our family of editor extensions join our existing extensions for [Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio) and [Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). We plan to continue iterating to make the GitLab Duo Code Suggestions experience even better.\n\nWe're also continuing our work on a [GitLab Language Server for Code Suggestions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-language-server-for-code-suggestions), which will allow us to standardize and iterate faster on our IDE extensions, and will enable users of IDEs and code editors to use GitLab Duo Code suggestions even if an official extension isn't available. We look forward to providing more documentation and working with the community on this project in the future.\n\nThese efforts are just the start of how we're incorporating GitLab Duo capabilities throughout the [software development lifecycle](/blog/what-the-ml-ai/) to help GitLab users become more efficient and effective. As we continue to identify painful and time-consuming tasks that are ideal for AI-assisted features, we'll continue to share updates, tutorials, and demos through this blog series.\n\nCheckout GitLabs 16.2 release to see what's new with [Code Suggestions](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#gitlab-duo-code-suggestions-improvements-powered-by-google-ai).\n\nInterested in using GitLab Duo features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and we'll keep you updated.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":2034,"featured":6,"template":693},"gitlab-jetbrains-neovim-plugins","content:en-us:blog:gitlab-jetbrains-neovim-plugins.yml","Gitlab Jetbrains Neovim Plugins","en-us/blog/gitlab-jetbrains-neovim-plugins.yml","en-us/blog/gitlab-jetbrains-neovim-plugins",{"_path":2040,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2041,"content":2046,"config":2052,"_id":2054,"_type":14,"title":2055,"_source":16,"_file":2056,"_stem":2057,"_extension":19},"/en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants",{"title":2042,"description":2043,"ogTitle":2042,"ogDescription":2043,"noIndex":6,"ogImage":892,"ogUrl":2044,"ogSiteName":706,"ogType":707,"canonicalUrls":2044,"schema":2045},"GitLab named a Leader in 2024 Gartner Magic Quadrant for AI Code Assistants","In the first Gartner® Magic Quadrant™ for this category, GitLab is recognized for its ability to execute and completeness of vision in AI code assistant technology.","https://about.gitlab.com/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a Leader in 2024 Gartner Magic Quadrant for AI Code Assistants\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2024-08-22\",\n      }",{"title":2042,"description":2043,"authors":2047,"heroImage":892,"date":2048,"body":2049,"category":10,"tags":2050},[1873],"2024-08-22","We’re thrilled to announce that GitLab has been recognized as a Leader in the [Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/gartner-mq-ai-code-assistants/) — the first-ever year of this category. We feel this is an important recognition and we believe it highlights our commitment to delivering AI-powered capabilities that accelerate software delivery, enhance security, and drive innovation for our customers. \n\nAI code assistants go beyond just code generation and completion. They're collaborative partners that boost developer efficiency by improving code quality and continuous learning. By automating routine tasks and providing intelligent suggestions, assistants like GitLab Duo — our suite of AI-powered features — free up developer time to focus on higher-level problem-solving. \n\n![Gartner MQ AI Code Assistants image](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675964/Blog/Content%20Images/AI_Code_Assistants_MQ_graphic__1_.png)\n\n> Download the [2024 Gartner® Magic Quadrant™ for AI Code Assistants report](https://about.gitlab.com/gartner-mq-ai-code-assistants/).\n\n## AI code assistants: Speed, security, and seamless integration\n\nAI code assistants are integral to organizations of all sizes, helping DevSecOps teams develop and deploy secure software faster. However, the true value of AI emerges when it’s integrated across the entire software development lifecycle. Unlike limited AI point solutions, which can lead to fragmented toolchains and data silos, GitLab’s comprehensive platform embeds AI from planning through production, offering holistic visibility and insights via metrics and dashboards.\n\n## The power of GitLab Duo\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) is a comprehensive toolbox of AI capabilities designed to improve the developer experience, shift security left in the development cycle, and strengthen collaboration across Dev, Sec, and Ops teams. Key features include: \n\n* Code Suggestions for code generation and code completion\n* Chat for context-aware, in-app assistance on code explanation, code refactoring, and test generation\n* Vulnerability Explanation to better understand vulnerabilities in code\n* Vulnerability Resolution to help mitigate found vulnerabilities\n* Root Cause Analysis to troubleshoot pipeline issues\n* AI Impact Analytics Dashboard to gain real-time insights and evaluate an organization's AI ROI\n\n## Maximizing ROI with AI \n\nBusiness and engineering leaders need visibility into how AI is being used across the software development lifecycle to assess the ROI of their technology investments. GitLab's [AI Impact Analytics Dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) provides that visibility as well as metrics to gauge AI adoption rates, performance improvements, and more.\n\n## Flexibility, privacy, and transparency at the forefront\n\nGitLab customers looking to explore AI-powered capabilities can use GitLab Duo to leverage the power of AI securely across an IDE of choice or a remote development workspace right out of the box, with a flexible pricing structure and a free trial. Also, the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) provides full visibility into our governance and transparency practices. \n\nSoon, organizations will be able to [tailor their AI experience](https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/) to their strategic and regulatory requirements with model personalization and self-hosted model deployment. Model personalization will allow enterprises to customize GitLab Duo and tap into AI’s full potential in close alignment with their business goals, operational needs, and customer expectations. Self-hosted model deployment ensures that data does not leave an organization's secure environment, reducing the risk of breaches and ensuring compliance for highly regulated industries. \n\n## Leading the future of AI in DevSecOps\n\nGitLab is your partner in AI-driven software development. We equip teams with the tools to build, secure, and deploy software faster. Our commitment to innovation ensures you're always at the forefront of AI advancements. Stay tuned for exciting updates on our roadmap as we continue to revolutionize DevSecOps.\n\n> [Download the 2024 Gartner® Magic Quadrant™ for AI Code Assistants report](https://about.gitlab.com/gartner-mq-ai-code-assistants/).\n\n***Source: Gartner, Magic Quadrant for AI Code Assistants, Arun Batchu, Haritha Khandabattu, Philip Walsh, Matt Brasier, August 2024***\n\n***GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally, and MAGIC QUADRANT is a registered trademark of Gartner, Inc. and/or its affiliates and are used herein with permission. All rights reserved.***\n\n***Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only\nthose vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research\norganization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.***\n\n***This graphic was published by Gartner Inc. as part of a larger report and should be evaluated in the context of the entire document. The Gartner\ndocument is available upon request from Gartner B.V.***\n",[1181,9,716,2051],"research",{"slug":2053,"featured":91,"template":693},"gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants","content:en-us:blog:gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants.yml","Gitlab Named A Leader In 2024 Gartner Magic Quadrant For Ai Code Assistants","en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants.yml","en-us/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants",{"_path":2059,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2060,"content":2066,"config":2071,"_id":2073,"_type":14,"title":2074,"_source":16,"_file":2075,"_stem":2076,"_extension":19},"/en-us/blog/gitlab-premium-with-duo",{"title":2061,"description":2062,"ogTitle":2061,"ogDescription":2062,"noIndex":6,"ogImage":2063,"ogUrl":2064,"ogSiteName":706,"ogType":707,"canonicalUrls":2064,"schema":2065},"Unlocking AI for every GitLab Premium and Ultimate customer","GitLab Premium and Ultimate now include GitLab Duo essentials for creating and understanding code throughout the software development lifecycle, all at no additional cost.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660188/Blog/Hero%20Images/blog-premium-with-duo-cover-0756-fy26-v2-1800x945.png","https://about.gitlab.com/blog/gitlab-premium-with-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unlocking AI for every GitLab Premium and Ultimate customer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2025-05-15\",\n      }",{"title":2061,"description":2062,"authors":2067,"heroImage":2063,"date":2068,"body":2069,"category":738,"tags":2070},[1549],"2025-05-15","Today, we launch GitLab 18.0, which highlights our latest innovations and plans in core DevSecOps workflows, security and compliance, and AI. __As part of this release, we're excited to announce that GitLab Premium and Ultimate now include essential GitLab Duo AI capabilities at no additional cost.__ All Premium and Ultimate customers will have immediate access to GitLab Duo Code Suggestions and Chat directly in their preferred supported source code editors and IDEs.\n\n## AI for every development team\n\nArtificial intelligence is now at the center of the developer experience. AI enhances coding in many ways: It analyzes your codebase and provides real-time suggestions as you type, creates functions and methods based on your project's context, reduces repetitive tasks, and automates code reviews.\n\nOver the past few years, we've built [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to infuse generative and agentic AI capabilities like these into our platform. Because writing code is just the start of the software lifecycle – our [global DevSecOps study](https://about.gitlab.com/developer-survey/) found that developers spend 79% of their time on tasks other than code creation – we have adopted a strategy to integrate AI throughout the entire software development lifecycle. \n\nNow, we’re excited to take the next step forward by including essential GitLab Duo capabilities in our GitLab Premium and Ultimate tiers, enabling developers to get the benefits of AI at no additional cost.\n\nBy including GitLab Duo Chat and Duo Code Suggestions in Premium and Ultimate, every software engineer can accelerate their workflow within the IDE — without requiring separate tooling, licensing, or governance. All existing Premium and Ultimate customers now have instant access to Duo Chat and Code Suggestions, once they upgrade to GitLab 18.0, and this enhancement becomes standard for all new customers.\n\n> **\"GitLab has already been instrumental in eliminating our reliance on a fragmented toolchain, which cut costs from disconnected solutions, and streamlined our workflow. Enhancing GitLab Premium with Duo will give us even greater efficiency and cost savings as our developers spend less time on routine coding tasks and more time tackling complex challenges that drive real business value.”**\n>\n>- Andrei Nita, Chief Technology Officer at McKenzie Intelligence Services\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1083723619?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Premium with Duo Core\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nPremium and Ultimate customers now have these AI-native capabilities:\n\n#### GitLab Duo Code Suggestions\n\n* Generate complete functions and code blocks from comments  \n* Get intelligent code completions as you type  \n* Support for 20+ programming languages  \n* Available in most popular IDEs\n\nTake this interactive tour to learn about GitLab Duo Code Suggestions (click on the image to start the tour).\n\n\u003Ca href=\"https://gitlab.navattic.com/code-suggestions\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175911/Blog/b5gdnls7jdyrpeyjby5j.png\" alt=\"GitLab Duo Code Suggestions cover image\">\u003C/a>\n\nLearn more in our [Duo Code Suggestions documentation](https://docs.gitlab.com/user/project/repository/code_suggestions/).\n\n#### GitLab Duo Chat\n\n* Explain unfamiliar code to understand complex functionality  \n* Refactor existing code to improve quality and maintainability  \n* Generate comprehensive test cases to help catch bugs earlier  \n* Fix code issues directly in your workflow\n\n![Duo Chat - API endpoint explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673912/Blog/Content%20Images/Duo_Chat_-_gif_-_API_endpoint_explanation__3_.gif)\n\nLearn more in our [Duo Chat documentation](https://docs.gitlab.com/user/gitlab_duo_chat/).\n\n> **\"For us, as GitLab users, Duo's intelligent code suggestions have become a daily asset for our developers. Combined with the chat feature, it allows for immediate feedback and iteration, resulting in faster development cycles and a more secure codebase. It's a seamless and powerful addition to our workflows.\"**\n>\n>- Felix Kortmann, Chief Technology Officer, Ignite by FORVIA HELLA\n\n## Duo Enterprise now available to GitLab Premium customers\n\nDue to strong customer demand, we're also excited to share that [GitLab Premium](https://about.gitlab.com/pricing/premium/) customers now can purchase Duo Enterprise, our full suite of AI offerings, without needing to upgrade to GitLab Ultimate. Premium customers can enjoy a rich AI experience seamlessly integrated across the software development lifecycle. This includes exciting GitLab Duo capabilities like:\n\n* [Root Cause Analysis](https://docs.gitlab.com/user/gitlab_duo/use_cases/#root-cause-analysis-use-cases) helps resolve CI/CD pipeline failures quickly, ensuring your CI/CD pipelines remain green.  \n* [Code Review](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/#have-gitlab-duo-review-your-code) enables faster merge request reviews by leveraging Duo as a code reviewer.  \n* [Advanced Chat](https://docs.gitlab.com/user/gitlab_duo_chat/) summarizes conversations, helps understand code changes, and provides advanced configuration assistance.  \n* [Self-Hosted](https://docs.gitlab.com/administration/gitlab_duo_self_hosted/) enables Duo to be leveraged within air-gapped and offline environments by hosting approved AI models for Duo to use.\n\nIn addition to Duo Enterprise availability, we continue to invest in the success of GitLab Premium customers. Since the launch of GitLab 17, [we’ve shipped more than a hundred features and improvements](https://gitlab.com/gitlab-org/gitlab/-/releases), including: \n\n* [**CI/CD Catalog**](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) enables developers to share, discover, and reuse   \npre-existing CI/CD components and configurations.  \n* [**Artifact registry**](https://docs.gitlab.com/user/packages/virtual_registry/) gives developers secure access to artifacts and seamless integration with CI/CD pipelines.  \n* [**Remote development**](https://docs.gitlab.com/user/project/remote_development/) enables developers to work in on-demand,  \ncloud-based development environments.\n\n> [Learn more about GitLab Premium features.](https://about.gitlab.com/pricing/premium/#wp-premium-features)\n\n## GitLab Duo: AI that meets organizations where they are\n\nGitLab customers have a comprehensive menu of Duo offerings, across our Pro and Enterprise solutions, to meet you where you are in the AI adoption cycle – the further along your teams are, the more capabilities you can use to build, test, and deploy secure software faster.\n\n![Key features in Duo plans](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673912/Blog/Content%20Images/Screenshot_2025-05-14_at_8.50.34_AM.png)\n\n## How current GitLab Ultimate and Premium customers can get started with Duo\n\nStarting with GitLab 18.0, for existing Ultimate and Premium customers, Duo Code Suggestions and Chat features will be off by default but can easily be enabled – learn how below.\n\nTo start experiencing GitLab Premium and Ultimate with Duo: \n\n1. Ensure you're on GitLab Premium or Ultimate. If not, you can start a free trial. \n\n2. Enable GitLab Duo in your organization settings.\n\n3. If using a local IDE, install the appropriate GitLab [Editor Extension](https://docs.gitlab.com/editor_extensions/#available-extensions). \n\n4. Start using Code Suggestions and Chat in your preferred supported local IDE or the GitLab Web IDE.\n\n**Note:** For new customers and trials, GitLab's AI capabilities will be enabled automatically.\n\n## AI-native development requires a DevSecOps platform\n\nAI is fundamentally reshaping the developer experience. Organizations won't just have more people building software. They'll have more production-ready code generated by AI – **making GitLab more essential than ever.** \n\nWe built GitLab Premium and Ultimate with Duo specifically for this new reality, giving teams one secure foundation for all their code. As AI generates code across your organization, GitLab becomes your control center: no separate tools for security scanning, compliance checks, or managing pipelines. Just a single, unified platform that scales with your organization and helps ensure all code meets your standards before reaching production. As AI accelerates your development, GitLab enables you to maintain control, security, and quality from end to end.\n\n> To learn more about GitLab Duo and all the ways it can transform how your team works, [visit our GitLab Premium page](https://about.gitlab.com/pricing/premium/) or if you are a GitLab customer, reach out to your GitLab representative to schedule a demo. Finally, we invite you to join us on June 24, 2025, for our [GitLab 18 virtual launch event](https://about.gitlab.com/eighteen/) to learn about the future of AI-native software development.\n",[9,496,1181,717,738],{"slug":2072,"featured":91,"template":693},"gitlab-premium-with-duo","content:en-us:blog:gitlab-premium-with-duo.yml","Gitlab Premium With Duo","en-us/blog/gitlab-premium-with-duo.yml","en-us/blog/gitlab-premium-with-duo",{"_path":2078,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2079,"content":2085,"config":2091,"_id":2093,"_type":14,"title":2094,"_source":16,"_file":2095,"_stem":2096,"_extension":19},"/en-us/blog/gitlab-suggested-reviewers",{"title":2080,"description":2081,"ogTitle":2080,"ogDescription":2081,"noIndex":6,"ogImage":2082,"ogUrl":2083,"ogSiteName":706,"ogType":707,"canonicalUrls":2083,"schema":2084},"Unblock code reviews with GitLab Suggested Reviewers","Identify the right reviewers more quickly, saving time and accelerating the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666316/Blog/Hero%20Images/codereview2.png","https://about.gitlab.com/blog/gitlab-suggested-reviewers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unblock code reviews with GitLab Suggested Reviewers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-09-21\",\n      }",{"title":2080,"description":2081,"authors":2086,"heroImage":2082,"date":2087,"body":2088,"category":10,"tags":2089},[774],"2023-09-21","\nIn the world of software development, speed is of the essence. The faster you can merge your code, the quicker you can iterate, innovate, and deliver value to your users. However, there's a roadblock that often hinders this need for speed – the arduous process of finding the right reviewer for your merge request (MR).\n\nImagine this: You've just finished a brilliant piece of code that's ready to be integrated into your project. You're excited to see it in action, and so are your teammates. But before your masterpiece can join the project, it must go through the crucial stage of code review. Here's where the challenge begins.\n\nIn many development teams, finding the ideal reviewer can be a time-consuming task. You might need someone with expertise in a particular area, someone who's available at the moment, and someone who can provide insightful feedback promptly. The longer it takes to locate the right reviewer, the more your code languishes in review limbo, delaying the entire development cycle.\n\nAnd let's not forget the frustration of starting a review with the wrong person, only to realize that their insights aren't quite what you needed. Backtracking to find another reviewer can be a difficult process, compounding the delays and creating unnecessary bottlenecks.\n\nThis challenge becomes even more pronounced as software projects grow in size and complexity, especially when multiple teams are involved. Coordinating reviews across team boundaries can be a logistical nightmare. \n\nWhat's worse is that these delays often set off a chain reaction, leading to even more delays down the line, as other tasks and dependencies pile up, affecting not just the current MR but the entire project's timeline.\n\n![development graphic](https://about.gitlab.com/images/blogimages/2023-09-22-suggested-reviewers/code-review.png)\n\nIn this blog post, we'll introduce you to [GitLab Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers). Suggested Reviewers is designed to streamline your MR workflow by ensuring that you connect with the right reviewers quickly and efficiently. Say goodbye to wasted time and welcome to a faster, more agile development cycle.\n\n## What is Suggested Reviewers?\nSuggested Reviewers leverages a machine learning algorithm that analyzes the changes in an MR and a project’s contribution graph to suggest reviewers with contextual knowledge. It generates a list of up to five suggested reviewers. The list is presented in the Reviewers dropdown in the merge request sidebar, empowering the author to immediately select available reviewers. These suggestions are contextual to the changes in the MR. Additional commits to MRs may change the reviewer suggestions, which are automatically updated in the reviewer dropdown list.\n\nIt also will display if a suggestion is a code owner or if they can merge the changes,helping drive towards action.\n\n![suggested reviewers](https://about.gitlab.com/images/blogimages/2023-09-22-suggested-reviewers/suggested-reviewers.png) \n\n## Key benefits of Suggested Reviewers\nHere are the key benefits of Suggested Reviewers.\n- **Time savings.** Suggested Reviewers eliminates the need for guesswork or manual reviewer selection. The feature automates the process, saving time for developers.\n- **Enhanced collaboration.** By connecting you with the right reviewers, Suggested Reviewers promotes collaboration and encourages knowledge sharing among team members.\n- **Improved code quality.** Suggested Reviewers enables developers to have the most qualified individuals review their code, resulting in better quality and more reliable software.\n- **Reduced bottlenecks.** Suggested Reviewers speeds up the review process and reduces bottlenecks by quickly identifying available and willing reviewers.\n- **Personalized suggestions.** Suggested Reviewers considers individual expertise and past interactions, providing tailored recommendations.\n\n## How to get started with Suggested Reviewers\n\nIn this blog post, we've explored how Suggested Reviewers transforms the development lifecycle. It's not just about faster code reviews; it's about faster, more efficient, and more collaborative software development from start to finish. \n\n> Learn more in our [Suggested Reviewers documentation](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers).\n",[834,2090,9],"collaboration",{"slug":2092,"featured":6,"template":693},"gitlab-suggested-reviewers","content:en-us:blog:gitlab-suggested-reviewers.yml","Gitlab Suggested Reviewers","en-us/blog/gitlab-suggested-reviewers.yml","en-us/blog/gitlab-suggested-reviewers",{"_path":2098,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2099,"content":2104,"config":2109,"_id":2111,"_type":14,"title":2112,"_source":16,"_file":2113,"_stem":2114,"_extension":19},"/en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"title":2100,"description":2101,"ogTitle":2100,"ogDescription":2101,"noIndex":6,"ogImage":2004,"ogUrl":2102,"ogSiteName":706,"ogType":707,"canonicalUrls":2102,"schema":2103},"GitLab survey highlights wins, challenges as orgs adopt DevSecOps","This year’s survey findings show that DevSecOps principles, together with a DevSecOps platform, help organizations ship more secure software, faster.","https://about.gitlab.com/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab survey highlights wins, challenges as orgs adopt DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":2100,"description":2101,"authors":2105,"heroImage":2004,"date":2106,"body":2107,"category":1201,"tags":2108},[1549],"2023-04-20","\nSecurity is everyone’s responsibility. And when everyone works together and has access to the same tools, you don’t have to sacrifice performance, efficiency, or security. That's the message from the respondents of our recent survey of software developers, IT operations, and IT security professionals worldwide. Where there is unity among Development, Security, and Operations in the common goal of securing the software supply chain, there is success.\n\nOur first report from the survey, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/), focuses on this throughline and illuminates where [DevSecOps](/topics/devsecops/) professionals feel positive about their efforts to secure the software development lifecycle and where they feel work still needs to be done. While the results are not surprising — they align with what I hear from customers every day — they reinforce GitLab’s belief that DevSecOps principles, coupled with a DevSecOps platform, help organizations ship more secure software, faster.\n\nFor instance, in last year’s report, a majority of development, security, and operations professionals said they felt individually responsible for security. This year, 53% of respondents said they are responsible for application security *as part of a larger team*. And 71% of security professionals said at least a quarter of all security vulnerabilities are being spotted by developers, up from 53% in 2022.\n\nWhat this tells us is that security is indeed making its way deep into the software development lifecycle and as more innovation is introduced into the daily workflow, including AI-assisted capabilities, the benefits are tangible.\n\nHere’s what the report findings suggest organizations should keep in mind so they can get the most out of DevSecOps.\n\n## AI is now inseparable from DevSecOps\nFor the past several years, we’ve seen AI become more and more established in software development workflows. In this year’s report, nearly two-thirds (65%) of developers said they are using AI in testing efforts or will be in the next three years. We also saw an uptick this year in the number of developers who are using AI to check code.\n\nAI represents a tectonic shift in the market that will have profound effects on how organizations deliver value to customers. To take full advantage of AI, it will be critical for organizations to apply AI-assisted workflows across the entire software development lifecycle and make them available to all personas — not just developers but everyone involved in the delivery of software value, from security and compliance teams to product development and marketing.\n\n## Security toolchain expansion is unsustainable\nThis year’s report showed that toolchain sprawl may be a bigger concern for security professionals than for the rest of the team; 57% of security respondents said they use six or more tools, compared to 48% of developers and 50% of operations professionals. We’re also seeing signs that security professionals are using _more_ tools than in past years. This is in line with what security practitioners tell me: They use different tools for each security function, including composition analysis, fuzzing, DAST, and dependency scanning.\n\nThe rise of DevOps and DevSecOps is making it easier for software development teams to consolidate tools, but the increased pressure around software supply chain security means this trend is not holding for security as it is for other roles. Security practitioners select the tools that get the job done and the tools they’re most comfortable with, but as security budgets tighten, that’s no longer going to be a sustainable strategy. We should expect to see a bigger push to consolidate security toolchains over the next several years.\n\n## Efficiency and security cannot be mutually exclusive\nThe first wave of budget tightening seems to be here already — 85% of the security professionals we surveyed told us they have the same or less budget this year than they did in 2022, and security professionals were also more likely than both developers and operations professionals to cite macroeconomic forces as a primary factor driving DevOps/DevSecOps to scale at their organizations. In this environment, organizations (and security teams) need to do more with less.\n\nFor many of the organizations I’ve talked to, tighter budgets mean more than just cutting costs. Organizations need to ensure they’re getting a swifter return on their DevSecOps investments. That return on investment could look like increased efficiency, translating into accelerated value delivery for customers, faster innovation, and more revenue. Or it could mean incorporating security and compliance tools earlier in the development lifecycle, reducing risk. Ideally, it’s all of the above. As organizations seek ways to stay ahead of the competition, security and efficiency are both non-negotiable.\n\n## A platform approach: The winning formula for DevSecOps\nHow can organizations foster collaboration, reduce toolchain friction, and boost efficiency without sacrificing security? A platform that puts DevSecOps methodologies into practice. This year’s respondents identified security and efficiency as the top two benefits of adopting a DevSecOps platform, ahead of automation, cost savings, and collaboration.\n\nA DevSecOps platform enables teams to collaborate in a single application, shortening cycle times, reducing risks, and accelerating everyone’s workflows. We see proof points in this year’s data: Security professionals who use a DevSecOps platform were significantly more likely than those who don’t use a platform to say developers catch more security vulnerabilities and had a higher opinion of their organization’s security efforts.\n\nIt has become important for organizations to foster collaboration and engagement to keep development, security, and operations teams happy.\n\n## Explore this year’s report\nRead the first report in our 2023 Global DevSecOps Report Series, [Security Without Sacrifices](https://about.gitlab.com/developer-survey/), and stay tuned for more reports on the data in the coming months.\n",[2014,716,691,9],{"slug":2110,"featured":6,"template":693},"gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops","content:en-us:blog:gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","Gitlab Survey Highlights Wins Challenges As Orgs Adopt Devsecops","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops.yml","en-us/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops",{"_path":2116,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2117,"content":2122,"config":2128,"_id":2130,"_type":14,"title":2131,"_source":16,"_file":2132,"_stem":2133,"_extension":19},"/en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"title":2118,"description":2119,"ogTitle":2118,"ogDescription":2119,"noIndex":6,"ogImage":929,"ogUrl":2120,"ogSiteName":706,"ogType":707,"canonicalUrls":2120,"schema":2121},"GitLab uses Anthropic for smart, safe AI-assisted code generation","Anthropic’s Claude AI model supports the delivery of helpful, trusted code in GitLab Duo Code Suggestions.","https://about.gitlab.com/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uses Anthropic for smart, safe AI-assisted code generation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kevin Chu\"}],\n        \"datePublished\": \"2024-01-16\",\n      }",{"title":2118,"description":2119,"authors":2123,"heroImage":929,"date":2125,"body":2126,"category":10,"tags":2127},[2124],"Kevin Chu","2024-01-16","GitLab recently launched GitLab Duo Code Suggestions into general availability. Code Suggestions includes the ability to generate algorithms or code blocks directly within the developer's IDE, a capability that uses [Anthropic's](https://www.anthropic.com/) generative AI model, [Claude](https://www.anthropic.com/index/claude-2-1). Integrated into the GitLab Duo portfolio of AI-assisted features, Claude is compatible with GitLab’s principles of [transparency and privacy](https://about.gitlab.com/the-source/ai/building-a-transparency-first-ai-strategy-7-questions-to-ask-your-devops/) by design and provides a high-integrity foundation for code generation.\n\nIn this post, you'll learn the advantages of code generation and how GitLab, together with Anthropic, is leveraging AI to responsibly boost developer productivity.\n\n## How AI-assisted code generation works\n\nCode Suggestions is incredibly useful as a coding companion that shows the suggestions as a developer types. It helps save developer time and keystrokes, reducing the effort for rote tasks and giving developers time back in their day. But what if a developer wants to do even more with generative AI?\n\nEnter code generation.\n\nImagine needing to write a new complex function based on an unfamiliar algorithm, or write a large amount of boilerplate code. Instead of struggling through these tasks with gritted teeth, code generation allows developers to simply define what they want to do in comments or multi-line comment blocks, and then Code Suggestions generates the code from there.\n\nHere is an example of Code Suggestions generating a JavaScript function that calculates the Levenshtein distance, a string metric useful for comparing the difference between two sequences:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175962/Blog/lkrk16unp4dcy3c4zwvw.gif\" alt=\"Code Suggestions generating JavaScript function\" width=\"100%\" height=\"auto\">\n\nHere is another example showing a multi-line comment in Python. We want Code Suggestions to generate a Tornado Web Server that does three things: log in, run a scan, and review the results. By providing the specific instructions, including details such as the framework and the components to use,, Code Suggestions was able to generate a Tornado App, despite this author being unfamiliar with Tornado.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175967/Blog/pxcdppnpzwfhgopxh999.gif\" alt=\"Code Suggestions generating Tornado app\" width=\"100%\" height=\"auto\">\n\nSafety through focus and trustworthiness\nDevelopers expect AI coding assistants to not only be helpful, but also accurate and safe. The system should generate precisely what is asked for while limiting deviation and [hallucination](https://www.ibm.com/topics/ai-hallucinations). Customers want assurances that AI-generated code can be trusted.\n\nThroughout GitLab's evaluation of certain code generation models, Claude stood out for its ability to mitigate distracting, unsafe, or deceptive behaviors. Claude also demonstrated consistent and accurate code generation throughout our testing.\n\nGitLab's use of Anthropic's Claude enables Code Suggestions to balance automation with trust. Code Suggestions helps users become more efficient without sacrificing reliability — a win for augmented development.\n\n## What’s next\n\nReady to experience the future of code generation? Start your [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today and unlock the power of AI-assisted development!",[9,233,716,917],{"slug":2129,"featured":91,"template":693},"gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","content:en-us:blog:gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","Gitlab Uses Anthropic For Smart Safe Ai Assisted Code Generation","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"_path":2135,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2136,"content":2141,"config":2146,"_id":2148,"_type":14,"title":2149,"_source":16,"_file":2150,"_stem":2151,"_extension":19},"/en-us/blog/gitlab-visual-studio-extension",{"title":2137,"description":2138,"ogTitle":2137,"ogDescription":2138,"noIndex":6,"ogImage":868,"ogUrl":2139,"ogSiteName":706,"ogType":707,"canonicalUrls":2139,"schema":2140},"GitLab for Visual Studio, including code suggestions, available in Beta","GitLab for Visual Studio is now available in Beta, bringing GitLab Duo code suggestions to Visual Studio.","https://about.gitlab.com/blog/gitlab-visual-studio-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab for Visual Studio, including code suggestions, available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-06-29\",\n      }",{"title":2137,"description":2138,"authors":2142,"heroImage":868,"date":2143,"body":2144,"category":10,"tags":2145},[1493],"2023-06-29","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nIn June, we shared our plans to [extend code suggestions](/blog/extending-code-suggestions/) to more IDEs, thereby continuing to enhance developer productivity. Over the past several weeks, we've been iterating quickly and we can share that GitLab for Visual Studio is available ([in Beta](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#beta)) from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio).\n\nThe GitLab for Visual Studio extension supports [GitLab Duo](https://about.gitlab.com/gitlab-duo/) code suggestions for both [GitLab SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) and [GitLab self-managed](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nDownload it and let us know what you think and any issues you're having in our [feedback issue](https://gitlab.com/gitlab-org/editor-extensions/gitlab-visual-studio-extension/-/issues/38).\n\n## Getting started\nTo get started with the extension, download it from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio). After the extension is downloaded and installed, you can follow our [setup instructions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-visual-studio-extension/-/blob/main/README.md#setup) to get it configured.\n\n![GitLab for Visual Studio setup](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-setup.png)\n\n## Using the extension\nOnce you've set up the extension, make sure things are configured properly and authentication is working by checking the status bar icon.\n\n![GitLab for Visual Studio status bar icon](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-status-bar-icon.png)\n\nIf everything looks good, you're ready to start receiving code suggestions as you work. Just start typing and GitLab Duo will automatically provide you suggestions inline. You can press \u003Ckbd>Tab\u003C/kbd> to accept the suggestions or just keep typing to receive new suggestions.\n\n![GitLab for Visual Studio suggestions](https://about.gitlab.com/images/blogimages/gitlab-visual-studio-extension-suggestions.gif)\n\n## Iterating on AI/ML features\nWhile this brings us one step closer to reaching developers working in Visual Studio, we still have our eyes on the [JetBrains IDEs](https://gitlab.com/gitlab-org/editor-extensions/gitlab-jetbrains-plugin) as well as a native integration for [Neovim](https://gitlab.com/gitlab-org/editor-extensions/gitlab.vim). You can track these projects and stay tuned for future announcements regarding their availability.\n\nWe're also working on a [GitLab Language Server for code suggestions](https://gitlab.com/gitlab-org/editor-extensions/gitlab-language-server-for-code-suggestions). This allows us to not only standardize and iterate faster on our IDE extensions, but for users of IDEs and Editors to use GitLab Duo code suggestions even if we're not officially providing an extension. We look forward to providing more documentation and working with the community on this project in the future.\n\nThese efforts are just the start of how we're bringing GitLab Duo capabilities throughout the software development lifecycle to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":2147,"featured":6,"template":693},"gitlab-visual-studio-extension","content:en-us:blog:gitlab-visual-studio-extension.yml","Gitlab Visual Studio Extension","en-us/blog/gitlab-visual-studio-extension.yml","en-us/blog/gitlab-visual-studio-extension",{"_path":2153,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2154,"content":2160,"config":2165,"_id":2167,"_type":14,"title":2168,"_source":16,"_file":2169,"_stem":2170,"_extension":19},"/en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"title":2155,"description":2156,"ogTitle":2155,"ogDescription":2156,"noIndex":6,"ogImage":2157,"ogUrl":2158,"ogSiteName":706,"ogType":707,"canonicalUrls":2158,"schema":2159},"Developing GitLab Duo: Secure and thoroughly test AI-generated code","Learn step-by-step how to enhance AI-generated code reliability and security using GitLab Duo and GitLab Pages (includes code samples and prompts).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097183/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097183481.png","https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Secure and thoroughly test AI-generated code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-30\",\n      }",{"title":2155,"description":2156,"authors":2161,"heroImage":2157,"date":2162,"body":2163,"category":10,"tags":2164},[1220],"2024-05-30","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nAs AI becomes increasingly integral to software development, ensuring the security and thorough testing of AI-generated code is paramount. This article is a step-by-step guide to combining [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI features to powering the DevSecOps workflow, and [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to secure and thoroughly test AI-generated code. You'll discover how to mitigate common risks, including how to automatically generate tests, test code, and deploy test reports – all to enhance the reliability of your AI-generated code.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Challenges in AI-generated code\n\nAI-generated code often faces issues such as:\n\n- Mismatched algorithms: Incorrect or suboptimal algorithms can be generated.\n- Dependency problems: AI may include dependencies that are outdated or incompatible.\n- Security vulnerabilities: AI might generate code with potential security flaws.\n\nAI-generated code often faces issues such as mismatched algorithms, dependency problems, and security vulnerabilities. A [recent study published by the Association of Computing Machinery](https://dl.acm.org/doi/pdf/10.1145/3613904.3642596) on ChatGPT’s responses to programming questions found that 52% of the answers contained incorrect information, and 77% were overly verbose. Despite these shortcomings, users preferred ChatGPT’s comprehensive and well-articulated answers 35% of the time, even overlooking misinformation 39% of the time. Addressing these challenges requires the use of advanced tools and frameworks.\n\n## GitLab’s approach to AI security and testing\n\nGitLab has a comprehensive content strategy focused on integrating security measures within the development workflow. By leveraging GitLab Duo for AI-powered code generation and GitLab Pages for embedding test reports, developers can ensure their AI-generated code is both secure and reliable.\n\nHere is a step-by-step guide to pair GitLab Duo and GitLab Pages to ensure secure and thoroughly tested AI-generated code by implementing a [Flask web server](https://flask.palletsprojects.com/en/3.0.x/).\n\n### 1. Create a new project on GitLab.com\n\n- Go to [GitLab.com](http://GitLab.com).\n- Click on the \"New project\" button.\n- Choose \"Create blank project\".\n- Enter a project name (e.g., AI_Code_Security).\n- Set the visibility level (Public, Internal, or Private).\n- Click \"Create project\".\n\n### 2. Enable GitLab Duo Code Suggestions\n\n- Navigate to your project.\n- Click on the \"Web IDE\" button to open the Web IDE.\n- Ensure that GitLab Duo features like Code Suggestions and Duo Chat are enabled. \n- Start coding in the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/). As you type, GitLab Duo will provide code suggestions to help you write code more efficiently.\n\n### 3. Create a Flask web server\n\nYou can create a Flask web server using the comment (highlighted in green) in the screenshot below.\n\n![DGD testing - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097192520.png)\n\n### 4. Generate tests with GitLab Duo\n\nUnit tests are essential for validating the functionality of the generated code. Use GitLab Duo’s `/tests` command to [generate testing suggestions directly in the Web IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat_examples.html#write-tests-in-the-ide). This command can be tailored with additional instructions to focus on specific aspects such as performance, regression, or using particular frameworks.\n\n#### Example usage in Web IDE:\n\n- Select the code for which you want to generate tests.\n- Use the command `/tests` followed by additional instructions if needed.\n\n![DGD testing - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097192521.png)\n\n### 5. Identify issues with AI-generated code using GitLab Duo Chat\n\nUse GitLab Duo Chat to review and refine AI-generated code. For instance, let's check our Flask web server code for security vulnerabilities:\n\n```unset\nPrompt: Review this code for potential security vulnerabilities and dependency issues.\n\n```\n\n![DGD testing - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097192523.png)\n\nGitLab Duo Chat can help identify the vulnerabilities in the above code.\n\n### 6. Generate test reports\nAfter running your tests, generate a test report that will be deployed using GitLab Pages.\n\n```unset\n\nPrompt: Write me a python script to generate a test report that will be deployed using \nGitLab Pages.\n\n```\n\n![DGD testing - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097192525.png)\n\nWhat's happening here?\n\n- The script ensures the test_reports directory exists.\n- It runs the `test_server.py` file using `subprocess.run()`, capturing the output.\n- The raw output is saved to `test_reports/test_output.txt`.\n- An HTML report is generated, embedding the test output within `\u003Cpre>` tags for readability, and saved as `test_reports/index.html`.\n\n### 7. Deploy the test report with GitLab Pages\n\nUse [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to host and display the test report. Here’s the configuration for our `.gitlab-ci.yml` file to deploy the test report.\n\n```python\n\nstages:\n  - test\n  - deploy\ntest_job:\n  stage: test\n  script:\n    - python generate_test_report.py\n  artifacts:\n    paths:\n      - test_reports/\npages:\n  stage: deploy\n  script:\n    - mv test_reports public\n  artifacts:\n    paths:\n      - public\n\n ```\n\nWith this setup, the `test_job` stage runs the Python script to generate the test report. The `pages` stage moves the `test_reports` directory to `public`, which GitLab Pages uses to serve the content.\n\n### 8. Embedding test reports in MR widgets\n\nEmbedding the [test reports in MR widgets](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html) provides immediate visibility into the testing outcomes, ensuring transparency and reliability. This can be done by including the test report as an artifact in your CI/CD pipeline configuration:\n\n```python\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - mkdir -p test-reports\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\npages:\n  stage: deploy\n  script:\n    - mkdir .public\n    - mv test-reports .public/\n  artifacts:\n    paths:\n      - .public\n\n```\nBy including the test report as an artifact and specifying it in the reports section, GitLab will automatically display the test results in the MR widgets, providing immediate visibility into the testing outcomes and enhancing transparency and reliability.\n\n### Case study: AI reliability with security policies and scanners\n\nImagine a scenario where an AI-generated code snippet introduces a dependency that has known vulnerabilities. By using GitLab Duo and its security policies, this dependency would be flagged during the code generation process. Let’s consider an example where a snippet was generated by AI:\n\n```python\n\nimport os\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/search')\ndef search():\n    query = request.args.get('query')\n    execute_os_command(query)\n    return 'You searched for: ' + query\n\ndef execute_os_command(command):\n    os.system(command)\n\nif __name__ == '__main__':\n    app.run()\n\n```\n\nIn this example, the search endpoint is vulnerable to OS command injection. By leveraging GitLab's Static Application Security Testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) component, this vulnerability would be detected during the CI/CD pipeline.\n\n#### Integrate SAST scanning to detect vulnerabilities\n\nGitLab SAST automatically analyzes your code for security vulnerabilities. Here’s how it can be integrated into your `.gitlab-ci.yml` file to scan for issues:\n\n```python\n\nstages:\n  - build\n  - test\n  - sast\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\nsast_job:\n  stage: sast\n  script:\n    - echo \"Running SAST...\"\n  artifacts:\n    reports:\n      sast: gl-sast-report.json\n  only:\n    - branches\n\npages:\n  stage: deploy\n  script:\n    - mv test-reports public\n  artifacts:\n    paths:\n      - public\n\n```\n\nIn this configuration, the `sast_job` stage runs SAST to detect vulnerabilities in the code, producing a report (`gl-sast-report.json`) that will be included in the pipeline artifacts! By integrating security policies and robust testing frameworks, GitLab Duo helps customers ensure that their AI-generated code is both efficient and secure.\n\n## Get started today\nThe integration of AI in software development brings significant benefits but also introduces new challenges. By using tools like GitLab Duo and GitLab Pages, developers can ensure that their AI-generated code is secure, well-tested, and reliable. Explore these tools today and join the conversation on enhancing AI security and testing!\n\n> [Start a trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today to access GitLab Duo and GitLab Pages.\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n",[9,716,737,691],{"slug":2166,"featured":6,"template":693},"how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","content:en-us:blog:how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","How Gitlab Duo Helps Secure And Thoroughly Test Ai Generated Code","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"_path":2172,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2173,"content":2179,"config":2185,"_id":2187,"_type":14,"title":2188,"_source":16,"_file":2189,"_stem":2190,"_extension":19},"/en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"title":2174,"description":2175,"ogTitle":2174,"ogDescription":2175,"noIndex":6,"ogImage":2176,"ogUrl":2177,"ogSiteName":706,"ogType":707,"canonicalUrls":2177,"schema":2178},"How GitLab uses prompt guardrails to help protect customers","Learn what prompt guardrails are, how they help mitigate security risks, and what unique considerations GitLab has taken into account when implementing them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663918/Blog/Hero%20Images/aipower.jpg","https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab uses prompt guardrails to help protect customers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"},{\"@type\":\"Person\",\"name\":\"Roger Woo\"}],\n        \"datePublished\": \"2025-01-30\",\n      }",{"title":2174,"description":2175,"authors":2180,"heroImage":2176,"date":2182,"body":2183,"category":10,"tags":2184},[1220,2181],"Roger Woo","2025-01-30","Imagine introducing a powerful new AI tool that boosts your team's productivity — accelerating code development, resolving issues faster, and streamlining workflows. The excitement is palpable, but questions about security and compliance quickly arise. How do you manage the risk of AI inadvertently exposing sensitive data or responding to malicious prompts? This is where prompt guardrails play a crucial role.\n\nPrompt guardrails are structured safeguards – combining instructions, filters, and context boundaries – designed to guide AI models toward secure and reliable responses. Think of them as safety rails on a bridge, working to keep data and interactions on the correct path while supporting your organization's security protocols. In this article, we'll explore how GitLab implements these guardrails, the risks they address, and their importance for security-conscious enterprises and compliance-focused teams.\n\n## Why prompt guardrails matter\n\nAI models have transformed how organizations work, offering powerful tools to enhance productivity and innovation. However, this power comes with inherent risks. Without safeguards, AI systems may unintentionally disclose sensitive information, such as personally identifiable information (PII) or proprietary business data, or potentially act on malicious instructions. Prompt guardrails address these challenges by creating boundaries for AI models to access and process approved content, contributing to reduced risk of unintended data exposure or manipulation.\n\nFor businesses operating under strict regulations like GDPR, prompt guardrails serve as essential protection mechanisms. More importantly, they build trust among decision-makers, end users, and customers, demonstrating [GitLab's commitment to secure and responsible AI usage](https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center/). With prompt guardrails in place, teams can embrace AI's potential while maintaining focus on protecting their critical assets.\n\n## GitLab’s approach to prompt guardrails\n\nAt GitLab, we're [building AI features](https://about.gitlab.com/blog/categories/ai-ml/) with security, transparency, and accountability in mind because we understand these elements are critical for our enterprise customers and their auditors.  \n\nHere’s how we’re putting that into practice.\n\n### Structured prompts and context boundaries\n\nOur system utilizes tags – like `\u003Cselected_code>` or `\u003Clog>` – to define boundaries for AI model interactions. When users ask GitLab Duo to troubleshoot a job failure, relevant logs are encapsulated in `\u003Clog>` tags. This structure guides the model to focus on specific data while working to prevent the influence from unauthorized or out-of-scope information.\n\n### Filtering and scanning tools\n\nWe employ tools like Gitleaks to scan inputs for secrets (API keys, passwords, etc.) before transmission to the AI. This filtering process helps minimize the potential for exposing confidential information or sending credentials into a model's prompt.\n\n### Role-based insights\n\nOur guardrails support focused AI discussions while contributing to customers' compliance efforts through controlled data handling and clear documentation. Organizations can adopt AI solutions designed to align with enterprise policies and risk tolerances.\n\n## Different approaches to prompt guardrails\n\nPrompt guardrails aren't one-size-fits-all solutions. Different strategies offer unique advantages, with effectiveness varying by use case and organizational requirements. GitLab combines multiple approaches to create a comprehensive system designed to balance security with usability.\n\n### System-level filters: The first line of defense\n\nSystem-level filters serve as a proactive barrier, scanning prompts for restricted keywords, patterns, or potentially harmful content. These filters work to identify and block potential risks — such as profanity, malicious commands, or unauthorized requests — before they reach the AI model.\n\nThis approach requires continuous updates to maintain effectiveness. As threats evolve, maintaining current libraries of restricted keywords and patterns becomes crucial. GitLab integrates these filters into its workflows to address potential risks at the earliest stage.\n\n### Model instruction tuning: Teaching the AI to stay on track\n\nInstruction tuning involves configuring AI behavior to align with specific guidelines. Our AI models are designed to reduce potentially problematic behaviors like role play, impersonation, or generating inappropriate content.\n\nThis foundation supports responses that remain informative, professional, and focused. When summarizing discussions or analyzing code, the AI maintains focus on the provided context, ideally mitigating potential deviation into unrelated topics.\n\n### Sidecar or gateway solutions: Adding a layer of protection\n\nSidecar or gateway solutions function as security checkpoints between users and AI models, processing both inputs and outputs. Like a customs officer reviewing luggage, these components help ensure only appropriate content passes through.\n\nThis approach proves particularly valuable in environments requiring strict information control, such as regulated industries or compliance-driven workflows.\n\n### Why GitLab combines these approaches\n\nNo single strategy addresses all potential risks. GitLab's hybrid approach combines system-level filters, instruction tuning, and sidecar solutions to create a robust security framework while maintaining usability.\n\nSystem-level filters provide initial screening, while instruction tuning aligns AI behavior with security standards. Sidecar solutions offer additional oversight, supporting transparency and control over data flow.\n\nThis combination creates a framework designed to support confident AI adoption while aiming to protect sensitive data and maintain compliance requirements.\n\n## Lessons learned\n\nWhile prompt guardrails help to significantly reduce risks, no system is infallible. Here are some lessons we have learned along the way:\n\n* Overly restrictive rules might hamper legitimate usage, frustrate developers, or slow down workflows. Striking the right balance between protecting data and providing real value is key.\n* Threat landscapes change, as do the ways people use AI. Regular updates to guardrails support alignment with current requirements and potential threats\n* At GitLab, we understand that no system can promise absolute security. Instead of making guarantees, we emphasize how our guardrails are designed to reduce risks and strengthen your defenses. This transparent approach builds trust by acknowledging that security is an ongoing process — one that we continuously refine to help support your organization’s evolving needs.\n* We gather feedback from actual user scenarios to iterate on our guardrails. Real-world insights help us refine instructions, tighten filters, and improve scanning tools over time.\n\n## Summary\n\nPrompt guardrails go beyond being a technical solution — they represent GitLab’s commitment to prioritizing AI security for our customers. By helping to reduce exposure, block harmful inputs, and ensure clear traceability of AI interactions, these guardrails aim to provide your teams with the confidence to innovate securely.\n\nWith [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our structured prompts, scanning tools, and carefully tuned instructions work together to help keep AI capabilities aligned with compliance standards and best practices. Whether you’re a developer, auditor, or decision-maker, these safeguards aim to enable you to embrace AI confidently while staying true to your organization’s security and compliance goals.\n\n> [Learn more about GitLab Duo and get started with a free trial today!](https://about.gitlab.com/gitlab-duo/)",[9,1082],{"slug":2186,"featured":91,"template":693},"how-gitlab-uses-prompt-guardrails-to-help-protect-customers","content:en-us:blog:how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","How Gitlab Uses Prompt Guardrails To Help Protect Customers","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers.yml","en-us/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers",{"_path":2192,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2193,"content":2199,"config":2206,"_id":2208,"_type":14,"title":2209,"_source":16,"_file":2210,"_stem":2211,"_extension":19},"/en-us/blog/how-is-ai-ml-changing-devops",{"title":2194,"description":2195,"ogTitle":2194,"ogDescription":2195,"noIndex":6,"ogImage":2196,"ogUrl":2197,"ogSiteName":706,"ogType":707,"canonicalUrls":2197,"schema":2198},"How is AI/ML changing DevOps?","Can DevOps help AI/ML find maturity? Here are questions to consider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667540/Blog/Hero%20Images/devops-team-structure.jpg","https://about.gitlab.com/blog/how-is-ai-ml-changing-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How is AI/ML changing DevOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-11-16\",\n      }",{"title":2194,"description":2195,"authors":2200,"heroImage":2196,"date":2202,"body":2203,"category":2204,"tags":2205},[2201],"Brendan O'Leary","2022-11-16","\n\nThe last few years have seen an explosion in artificial intelligence, [machine learning](/blog/top-10-ways-machine-learning-may-help-devops/), and other types of projects. Companies like Hugging Face and applications like [DALL-E 2](https://openai.com/dall-e-2/) have brought to the mainstream what the power of AI/ML can bring to the next generation of computing and software. As every company has become a software company over the last few decades, the ability to innovate and leverage the ever-growing amount of data that organizations have access to have become where enterprises turn to compete.\n\nHowever, a lot of AI/ML projects get stalled from several challenges that may seem familiar to software professionals who have been around since [the early days of DevOps](/blog/the-journey-to-a-devops-platform/).  Adoption and optimization of artificial intelligence and machine learning have been hampered by a lack of repeatability for experiments, a disparity of tools and information silos, and a lack of team collaboration.\n\n## A new model for data modeling\n\nOne of the first ways to look at this problem is to make sure that the mental model is in place to allow the team to reason about both the strategic vision for AI/ML at your organization. And once that has been established, also think about the tactical “jobs to be done” to lay the foundation for that work.\n\nStrategically, there are many teams that have to come together for a successful AI/ML program. First, the data has to both be acquired and transformed into a usable set of clean data. Often referred to as [“DataOps”](/blog/introducing-modelops-to-solve-data-science-challenges/) this involves the typical “ETL” or extract, load, transform processes data has to go through to be useful for teams. From there, you have to productionize the data workloads through MLOps - the experimentation, training, testing, and deployment of meaningful models based on the extracted and transformed data.\n\nAnd once those two steps are complete, you can finally understand how to make production use cases for your data. You can use AI Assisted features to focus on improving user experiences, for financial forecasting, or for general trends and analysis of various parts of your business. Given the complexity of this value chain, the various teams and skills involved, and the current mishmash of tooling, there is a lot that teams can learn from the history of DevOps as they tackle these problems.\n\n## DevOps and AI/ML\n\nMuch like the various stages of obtaining and applying AI/ML for business uses, software development consists of many varied steps with different teams and skills sets to achieve the business goals outlined. That is why years ago, folks came up with this [concept of “DevOps”](/topics/devops/)– combining teams and having them work together in a cycle of continuous improvement towards the same goals – to combat silos and inefficiencies. \n\nData science teams are using specialized tools that don't integrate with the existing software development lifecycle tools they already use. This causes teams to work in silos, creating handoff friction and resulting in finger-pointing and lack of predictability. Businesses and software teams often fail to take advantage of data, and it takes months for models to get into production by which time they may be out of date or behind competitors.  Security and data ethics are frequently treated as an afterthought. This creates risk for organizations and slows innovation. \n\n## Learning from the past\n\nIf the past decades of “DevOps” evolution have taught us anything, it's that breaking down the silos between teams through the tools and processes they are using pays off dividends for business. As your team begins their [AI/ML journey](/blog/why-ai-in-devops-is-here-to-stay/) — or if you've found yourself stalling in AI/ML initiatives already — you should consider how you can consolidate teams together, ensure they are working efficiently together, and able to collaborate without boundaries.\n\nAn explosion of tools in the space is tantalizing with the promise of “getting started” quickly. But it may not set your organization up for long-term success in these areas if those tools have the effect of separating parts of your organization from one another. Creating and sustaining an AI/ML program will require intentionality behind both the processes and tools your team is using. That allows your teams to extract, transform and load data efficiently, tune, test and deploy models effectively, and leverage AI/ML to drive value for your stakeholders for the long haul.\n","insights",[917,233,1083,9],{"slug":2207,"featured":6,"template":693},"how-is-ai-ml-changing-devops","content:en-us:blog:how-is-ai-ml-changing-devops.yml","How Is Ai Ml Changing Devops","en-us/blog/how-is-ai-ml-changing-devops.yml","en-us/blog/how-is-ai-ml-changing-devops",{"_path":2213,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2214,"content":2220,"config":2228,"_id":2230,"_type":14,"title":2231,"_source":16,"_file":2232,"_stem":2233,"_extension":19},"/en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"title":2215,"description":2216,"ogTitle":2215,"ogDescription":2216,"noIndex":6,"ogImage":2217,"ogUrl":2218,"ogSiteName":706,"ogType":707,"canonicalUrls":2218,"schema":2219},"Fighting the opioid epidemic with ML & human-centered design","GitLab users Jack Cackler and Frank Lee explain how they use predictive analytics to empower community stakeholders, like first responders and policy makers, to save lives.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671593/Blog/Hero%20Images/telesphora-team.jpg","https://about.gitlab.com/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-09-05\",\n      }",{"title":2221,"description":2216,"authors":2222,"heroImage":2217,"date":2224,"body":2225,"category":2204,"tags":2226},"How Telesphora is tackling the opioid epidemic with machine learning and human-centered design",[2223],"Erica Lindberg","2018-09-05","\n\nOn average, [115 Americans die every day](https://www.cdc.gov/drugoverdose/epidemic/index.html) from an opioid overdose. The team at [Telesphora](https://telesphora.com/) is on a mission to help health care professionals and local communities change that.\n\nIn 2017, the United States Department of Health and Human Services (HHS) declared the current opioid crisis a public health emergency as the number of [deaths involving opioids](https://www.drugabuse.gov/related-topics/trends-statistics/overdose-death-rates) in the United States skyrocketed from approximately 10,000 in 2002 to an estimated 49,000 in 2017.\nIn response to the crisis, the HHS released a [five-point strategy](https://www.hhs.gov/opioids/about-the-epidemic/hhs-response/index.html) for fighting the opioid epidemic. Among the key priorities you’d expect to see from a health crisis report (e.g. better prevention, treatment, and recovery services; better pain management) is **better data**, and they’ve turned to computer and data scientists for help.\n\n![US map of opioid epidemic](https://about.gitlab.com/images/blogimages/telesphora/us-map-crisis.jpg){: .medium.center}\n*\u003Csmall>In 2016, the number of overdose deaths involving opioids was 5 times higher than in 1999.\u003C/small>*\n\n## Designing for people\n\nJack Cackler is a machine learning specialist. Frank Lee is a pain management specialist. Under typical circumstances, these two may have never met. But when the HHS decided to hold an unprecedented [national opioid crisis code-a-thon](https://www.hhs.gov/challenges/code-a-thon/index.html), they didn’t just enlist developers – they brought in stakeholders from every side of the issue to develop data-driven solutions to combat the opioid epidemic across three tracks: treatment, usage, and prevention.\n\n[Origami Innovations](https://origamiinnovations.com/), a design, innovation, and solution lab powered by a Yale University students, was invited to the code-a-thon, bringing Cackler, Lee, and co-founders Matthew Erlendson, fourth-year medical student at Yale University and founder of Origami Innovations, and Dara Rouholiman, a digital health, data, and machine learning consultant together for the time. After winning the Treatment Track and receiving a $10,000 prize, they formed Telesphora, a human-centered data science platform.\n\n“One of the things that we were involved with was coming up with the core themes for the hackathon,” said Frank Lee, co-founder of Telesphora. “One of the ways that we do that is by human-centered design thinking.”\n\nHuman-centered design is an approach to design that considers the human perspective in every step of the problem-solving process. As Jack Cackler, co-founder at Telesphora, explains, “Sometimes, especially for those with a technical background, there’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\n> \"There’s a tendency to just focus on a technical solution. We really tried to get the story behind how this [opioid crisis] really impacted people.”\n\nCackler and team knew they wanted to design a human-centered solution. Discovering that the stigma of chronic opioid use was preventing treatment, they started asking questions:\n\n- *How might we treat this like a disease to reduce stigma, taking an empathetic approach similar to outbreaks of the flu or STDs?*\n- *How might we better predict community outbreaks?*\n- *How might we contain high mortality outbreaks, such as bad batches of drugs, to save lives in real time?*\n\n“We involved all the stakeholders in the crisis, which includes not only the providers, the scientists, and the administrators of the local and the state regions, but also the patients and families of patients who are affected by the overdose,” said Lee. “After doing a lot of brainstorming with these participants, we knew there needed to be better communication between first responders. We aimed our solution toward first responders and how they can help each other better allocate resources to help with the overdoses.”\n\n## Empathy over stigma\n\nOn June 23, 2016 in New Haven, Connecticut, where many on Cackler and Lee’s code-a-thon team called home, 12 patients, found within a one-block radius, were taken to Yale New Haven Hospital for opioid overdose. Three lost their lives due to a shortage of the drug Narcan (naloxone), a drug that can treat an opioid overdose to prevent death; the shelf life is short and the cost is high.\n\nPart of the problem, according to Lee and Cackler, is that there’s a common assumption that there’s a uniform distribution of overdoses, therefore, you can accommodate the demand. However, data analysis and conversations with first responders show that overdoses happen in spikes, like the event in New Haven.\n\n“There will be a new distribution channel of some opioid in some city. And then all of a sudden, you'll have a dozen, two dozen overdoses in a weekend, and there's just no way that the ambulances in the city can service that demand,” said Cackler. If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\n\n![telesphora interface](https://about.gitlab.com/images/blogimages/telesphora/hhs1.png){: .medium.center}\n*\u003Csmall>Telesphora is a platform that uses real-time, open-access data and machine learning to predict where and when increases in opioid overdose and mortality will occur.\u003C/small>*\n\nThe solution Cackler, Lee, and the team came up with, now Telesphora, aimed to do just that. Using real-time data and future-trend data, they built a platform that empowers communities to predict outbreaks, increases access to treatment and resources, and reduces the stigma of opioid use.\n\n## Predictive analytics and user-friendly tools save lives\n\nKnowing that if an overdose outbreak is predicted before it happens, life-saving medicine can be allocated to the soon-to-be affected area to save lives, the Telesphora team used predictive analytics and user-friendly design to build a projection model and visualize the data.\n\n> \"If the outbreak in New Haven could have been predicted, health agencies could have prepared and saved lives.\"\n\nStarting with historical overdose data and network analysis of supply movements and overdoses, they created a spatiotemporal Poisson process to project future opioid overdose trends at any given space and time. The Poisson process takes real-time data and uses the geographic information, temporal information, and type of drug to predict the movement of opioids, alerting local responders and authorities of a potential overdose outbreak before it happens, bringing response time and mortality rate down.\n\n“The first alerts in this model come from neighboring cities in a flurry of mortality rate. Our tool with a geospatial analysis can predict the movement of spikes. When you see a spike in fentanyl in New Haven, CT, 4.8 days later you’ll see a spike happen in Fairfield,” Cackler explains.\n\n![machine learning explanation](https://about.gitlab.com/images/blogimages/telesphora/machine-learning.jpg){: .medium.center}\n*\u003Csmall>The machine learning model predicts the movement of outbreaks based on surrounding counties.\u003C/small>*\n\nWhen an outbreak is detected, it appears as a spike on the graph and the model can correlate that spike to different regions, alerting communities to how many days until that outbreak affects their area. The data visualization makes it easy for end users, like first responders, to digest the numbers and trends, showing the actual and predicted data across different regions, and the ability to filter by different drugs.\n\n“If we had this model a year before, events like what happened in New Haven could have been predicted. I think that’s really impactful and you can see in a tangible way how this is actionable,” said Cackler.\n\n*Are you using machine learning or human-centered design to build actionable solutions for the future? We want to hear from you! Email content@gitlab.com.*\n\nAll images courtesy of Telesphora\n{: .note}\n",[2227,9],"user stories",{"slug":2229,"featured":6,"template":693},"how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design","content:en-us:blog:how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","How Telesphora Is Tackling The Opioid Crisis Machine Learning Human Centered Design","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design.yml","en-us/blog/how-telesphora-is-tackling-the-opioid-crisis-machine-learning-human-centered-design",{"_path":2235,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2236,"content":2242,"config":2247,"_id":2249,"_type":14,"title":2250,"_source":16,"_file":2251,"_stem":2252,"_extension":19},"/en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"title":2237,"description":2238,"ogTitle":2237,"ogDescription":2238,"noIndex":6,"ogImage":2239,"ogUrl":2240,"ogSiteName":706,"ogType":707,"canonicalUrls":2240,"schema":2241},"How to leverage GitLab Duo for enhanced security reporting","Learn how GitLab Duo enables efficient, real-world security reporting for development, operations, and security teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098339/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750098339103.png","https://about.gitlab.com/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab Duo for enhanced security reporting\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valentine Mairet\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":2237,"description":2238,"authors":2243,"heroImage":2239,"date":1951,"body":2245,"category":10,"tags":2246},[2244,1220],"Valentine Mairet","Good security reporting is crucial to maintain a good security posture because it provides detailed insights into incidents. With this information, organizations can better understand vulnerabilities, improve defenses, and prevent similar threats in the future. At GitLab, the [Security division](https://handbook.gitlab.com/handbook/security/#division-structure) has created use cases for GitLab Duo to improve reporting capabilities and enhance operational efficiency. \n\n## GitLab Duo’s security capabilities\n\nThe GitLab Security division uses GitLab’s built-in [incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html) to manage and report on security incidents. Incidents are handled, documented, and resolved in GitLab, enabling the use of AI-driven [GitLab Duo](https://about.gitlab.com/gitlab-duo/) as an assistant when performing security operations like incident response. \n\nParticularly in incident analysis and reporting, GitLab Duo is highly efficient and accurate at creating proper documentation and is a great “pair programmer” when solving security incidents.\n\n## GitLab Duo features for security reporting\n\nGitLab Duo offers many features that enhance security reporting:\n\n- **Root Cause Analysis:** GitLab Duo can explain vulnerabilities and understand the context of an incident issue, making it an excellent assistant for performing root cause analyses of security incidents.\n- **Vulnerability Explanation:** Provides detailed insights into identified vulnerabilities, including potential exploitation methods and remediation steps. This feature aids developers and security analysts in understanding and addressing security issues effectively.\n- **Vulnerability Resolution:** Assists in fixing vulnerabilities by generating merge requests that address the identified issues, streamlining the remediation process.\n- **Code Explanation:** Helps users comprehend specific code segments by offering clear explanations, which is particularly useful when dealing with complex or unfamiliar codebases.\n- **Test Generation:** Facilitates early bug detection by generating tests for selected code, ensuring that security vulnerabilities are identified and addressed promptly.\n- **Refactor Code:** Suggests improvements or refactoring for selected code to enhance its quality and maintainability, contributing to a more secure codebase.\n- **Fix Code:** Identifies and rectifies quality issues such as bugs or typos in the selected code, helping maintain a robust and secure codebase.\n\n## Practical use cases\n\nFor the purpose of demonstrating practical use cases, the Security Incident Response Team created a dummy incident with following limited information:\n\n![Incident report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098346297.png)\n\nSeveral comments were added as the team would normally proceed:\n\n![Comments added to report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098346297.png)\n\n### Incident reporting\n\nGitLab Duo is able to comprehensively keep track of all information inside an incident issue, including the issue description, comments, and labels. When handling security incidents, information often is all over the place and can change over time. It can easily get lost or overlooked. GitLab Duo is excellent at finding relevant information again to create accurate incident reports. \n\nNavigate to your incident issue and open GitLab Duo Chat. You can engineer your prompt so that GitLab Duo takes your exact reporting requirements into account such as what sections you need and how they should be filled out. Here is an example of the prompt we use at GitLab Security:\n\n> Required sections:\n> - Executive Summary - bottom-line-up-front that is adequate for an audience like senior leadership and CISO\n> - Mitigations & Remediations\n> - Scope & Impact (Environments, customers, team members)\n> - Cause\n> - Detection Capabilities\n>\n> Based on the issue and the required section, write the security incident report. Flag any missing information. \n\nBelow is GitLab Duo’s output that we can use to create our extensive incident report:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175975/Blog/zxiafech08ck9gqjubaz.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175985/Blog/xjc9coah4oi7rz0daw2b.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\n### Root Cause Analysis\n\nGitLab Duo is able to explain vulnerabilities and can understand the context of an incident issue. These capabilities make GitLab Duo an excellent assistant to perform root cause analyses of security incidents. \n\nAs seen above, GitLab Duo can write its own section on the cause of an incident based entirely on the issue contents. We can dig into this further with the assistance of GitLab Duo to identify several candidates for root cause(s).  \n\nOur prompt is built as such:\n\n> “The root cause of the incident was the exposure of a personal access token in CI job logs. The token was leaked in a project that was not included in the current list of projects for token detection, indicating a gap in the security monitoring process.”\n> Name a few potential root causes for a PAT finding itself in CI job logs.\n\nGitLab Duo’s response is the following:\n\n![GitLab Duo Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098346298.png)\n\nGitLab Duo Root Cause Analysis can help security teams identify avenues worth investigating. It has a pretty good grasp on security concepts and how Root Cause Analysis is led in general, so it can be a great assistant for these situations where the exact root of the problem is unclear. \n\n> Learn more about [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/).\n\n### On-demand updates\n\nSecurity incidents move fast and can be confusing, and information is constantly changing and shifting around. Higher severity incidents require a certain degree of thoroughness when it comes to reporting to senior leadership, especially when the audience is at the Director+ level. Engineers who are in the midst of a security incident can find it difficlut to condense the information such that it is digestable for executives. \n\nWe saw above that GitLab Duo is capable of delivering a pretty good executive summary. When the incident is ongoing, we need to deliver regular updates to senior leadership on the incident status and next steps. GitLab Duo is a great help for that, as well. If information is scattered across the issue in the form of a description or comments, GitLab Duo can help reassemble this information into the “bottom-line-up-front,” or BLUF summary, we need for executive updates. \n\nWe’ve taken the same incident right before token revocation and asked GitLab Duo for a BLUF summary where the audience is the Director of Security Operations. \n\n![Executive Summary - GitLab Duo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098346299.png)\n\n## Getting started with GitLab Duo for security\n\nGitLab Security has automated several parts of the reporting process with the help of GitLab Duo. But to get started, all you need is access to GitLab Duo Chat. GitLab Duo Chat can be your well-informed assistant for many security reporting cases and post-mortem analyses.\n\n## What’s next for GitLab Duo?\n\nGitLab is committed to continuously enhancing GitLab Duo’s capabilities. Future developments aim to integrate AI-driven features more deeply into the security workflow, providing proactive detection and resolution of vulnerabilities, streamlined incident management, and comprehensive reporting tools. These advancements will further empower security teams to maintain robust security postures and respond effectively to emerging threats.\n\n> [Try GitLab Duo for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)!\n",[9,737,691,717,496],{"slug":2248,"featured":6,"template":693},"how-to-leverage-gitlab-duo-for-enhanced-security-reporting","content:en-us:blog:how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","How To Leverage Gitlab Duo For Enhanced Security Reporting","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"_path":2254,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2255,"content":2261,"config":2266,"_id":2268,"_type":14,"title":2269,"_source":16,"_file":2270,"_stem":2271,"_extension":19},"/en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"title":2256,"description":2257,"ogTitle":2256,"ogDescription":2257,"noIndex":6,"ogImage":2258,"ogUrl":2259,"ogSiteName":706,"ogType":707,"canonicalUrls":2259,"schema":2260},"How to successfully deliver your software development roadmap","Here are three common blockers and how to overcome them to fully realize the ROI of a DevSecOps platform investment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669103/Blog/Hero%20Images/AdobeStock_243118595.jpg","https://about.gitlab.com/blog/how-to-successfully-deliver-your-software-development-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to successfully deliver your software development roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-04-04\",\n      }",{"title":2256,"description":2257,"authors":2262,"heroImage":2258,"date":2263,"body":2264,"category":1201,"tags":2265},[1549],"2024-04-04","2024 is shaping up to be the year of DevSecOps, where more organizations realize the full potential of blending development, security, and operations through the adoption of a comprehensive platform. This is when teams will move beyond using just source code management (SCM) and tap into all the [AI-powered features](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/) available across the software development lifecycle (SDLC), delivering better, more secure software faster. But first organizations have to knock down the blockers that can get in the way of successful [DevSecOps](https://about.gitlab.com/topics/devsecops/) adoption.\n\nIn talking to customers at organizations of all sizes, I've heard three main blockers:\n* The potential of AI is believable, but right now it seems limited to code creation and that has limited impact as there is more to the SDLC.\n* A platform seems like a great idea, but forcing my development, security, and operations team to give up their preferred tools all at once will undoubtedly cause a revolt. Yet, without everyone on the same platform, the investment is hard to justify.\n* Regulations and compliance makes it difficult to leverage a DevSecOps SaaS solution, and being on a multi-tenant solution is a non-starter for us as we are in a highly regulated industry. However, the overhead of self-hosting a DevSecOps platform is becoming untenable at our scale.\n\nWhile legitimate concerns, these blockers can be eliminated by combining DevSecOps practices and a platform approach. \"Making sure that we spend our money wisely is very, very important. GitLab allowed us to reduce our costs and centralize our work in one place. It’s been money well spent,\" says Andy Chow, Technology Chief of Staff at global fintech company Airwallex.\n\nLet's dig deeper into each blocker and see how it is resolved with a DevSecOps platform.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### AI is not limited: It is having real impact across the SDLC\n\nWe know that AI is already improving the developer experience but there is so much more that AI can do across the entire SDLC. With AI, organizations can unburden development, security, and operations teams from tedious tasks by taking advantage of the efficiencies that AI provides. For instance, users can access summaries of comments in merge requests, have tests generated, refactor sections of code, and perform other time-saving actions.\n\nThat's why with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered workflows, we focus on more than just code creation — after all, [code creation only accounts for 25% of a developer's time](https://about.gitlab.com/blog/gitlab-global-devsecops-ai-report/). There is so much more that happens in the SDLC where AI can add efficiency. For instance, development, security, and operations teams that use AI-powered capabilities, such as Vulnerability Remediation and Root Cause Analysis, share that they can find and resolve vulnerabilities earlier and identify CI/CD pipeline failures faster and in a more collaborative manner.\n\n### Forget one-size-fits all, migrate your way\n\nRealizing the benefit of a DevSecOps platform is not a one-size-fits-all. You can customize your deployment to fit your organization's needs and where you are in your digital transformation journey — choosing one team at a time to adopt the platform or a full cutover. I have advice, though: Commit to using more than just source code management. A DevSecOps platform is a robust solution that includes enterprise agile planning, CI/CD, security and compliance, value stream analytics, and more. Also, make sure that as you deploy your platform, your users agree to get familiar with its range of capabilities — and not still maintain [a complex toolchain](https://about.gitlab.com/blog/battling-toolchain-technical-debt/).\n\nThe way to extract the most ROI and satisfaction from your migration is to show users how to get the functionality they had in their other tools from within the DevSecOps platform. To that end, we've increased our resources to support you. From [in-depth tutorials](https://about.gitlab.com/blog/tags/tutorial/) to [clear reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/), we have a vast library of content (including videos) for you to draw upon to help your users acclimate to and thrive in the DevSecOps environment. \n\nWe've also made it easier to onboarding teams, with capabilities like [remote development environments](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), enabling organizations to reduce adoption friction. In fact, as more teams within your organization adopt GitLab, consider expanding access for other critical functions that contribute to delivering software value such as Finance, Legal, and Marketing teams. The power of a DevSecOps platform is giving everyone visibility into the SDLC which drives better collaboration, improves planning, reduces security risk, improves team velocity, and leads to faster time-to-value. This means your teams are happier and so are the users of the applications you build, secure, and deploy using GitLab.\n\n> Read how the U.S. Navy's Black Pearl [sped up onboarding using GitLab](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/).\n\n**Note:** GitLab doesn't have to be introduced to the organization by the development team. For instance, if security teams want vulnerabilities identified and mitigated earlier in the lifecycle or increased compliance via security scanning, they can recommend that developers use the DevSecOps platform. [Read how U.K. retailer Dunelm made this happen](https://about.gitlab.com/customers/dunelm/).\n\n### Multi-tenancy is just one option; single-tenancy can address regulatory requirements\n\nKeeping software up-to-date and secure while maintaining compliance with strict regulations can make self-hosting a challenge. In June 2023, we launched [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS solution, into general availability to address the needs of organizations in highly regulated industries like finance and healthcare, and in highly regulated geographies such as the European Union. GitLab Dedicated provides the secure environment organizations need for regulatory compliance, including control over data residency and isolation, while removing the overhead of self-hosting.\n\nDedicated customers are upgraded automatically every month, which means they have all of the benefits without the administration overhead. Furthermore, GitLab Dedicated comes with [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/), enabling organizations to ship secure software faster with built-in compliance visibility and controls as well as advanced security scanning capabilities.\n\n> Learn the [origins of GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/) and how it has grown into the solution highly regulated organizations need.\n\n### Try GitLab today\n\nAs you proceed with your software development roadmap for 2024 (and beyond), consider what an AI-powered DevSecOps platform could do for your organization. Also keep an eye on [our Direction page](https://about.gitlab.com/direction/#fy25-rd-investment-themes) to learn about what’s coming next and [our monthly release posts](https://about.gitlab.com/blog/categories/devsecops-platform/) to learn about the latest and greatest available.\n\n> Start your trial of [GitLab Duo Pro](https://about.gitlab.com/gitlab-duo/#free-trial) or [GitLab Ultimate](https://gitlab.com/-/trials/new) for free today.\n",[9,716,496,691],{"slug":2267,"featured":91,"template":693},"how-to-successfully-deliver-your-software-development-roadmap","content:en-us:blog:how-to-successfully-deliver-your-software-development-roadmap.yml","How To Successfully Deliver Your Software Development Roadmap","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap.yml","en-us/blog/how-to-successfully-deliver-your-software-development-roadmap",{"_path":2273,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2274,"content":2280,"config":2285,"_id":2287,"_type":14,"title":2288,"_source":16,"_file":2289,"_stem":2290,"_extension":19},"/en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"title":2275,"description":2276,"ogTitle":2275,"ogDescription":2276,"noIndex":6,"ogImage":2277,"ogUrl":2278,"ogSiteName":706,"ogType":707,"canonicalUrls":2278,"schema":2279},"ICYMI: Key AI and security insights from our developer community","Our latest LinkedIn Live highlights the hottest trends in AI, security, DevSecOps, and more. Also get a taste of the GitLab community contributions that are making an impact.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098331/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%286%29_55zMmdJIUpfh5qaPW9dtVA_1750098331584.png","https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ICYMI: Key AI and security insights from our developer community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":2275,"description":2276,"authors":2281,"heroImage":2277,"date":2282,"body":2283,"category":1201,"tags":2284},[711],"2024-12-05","In our [November LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857), we brought together field CTOs, developer advocates, and community leaders to discuss industry trends and showcase features making a difference in developer workflows.\n\nHere are 5 key highlights:\n\n### 1. AI adoption trends from the field\nOur field CTOs shared insights on how organizations are embracing AI across their development workflows. For instance, Field CTO Cherry Han highlighted how financial organizations are thinking beyond individual developer tools.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Ai Adoption Trends from the Field\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nAndrew Hasker, Field CTO for Asia Pacific and Japan, offered valuable perspective on AI adoption.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388277?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"From Field CTOs\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Security coverage that makes a difference\n\nStaff Developer Advocate Fernando Diaz demonstrated how GitLab's security scanners cover the complete application lifecycle, showing how easy it is to implement [comprehensive security scanning](https://about.gitlab.com/solutions/security-compliance/) with just a few lines of code.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388297?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Security Coverage\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. AI-powered language migration made simple\nIn an impressive demonstration, Senior Technical Marketing Manager Cesar Saavedra showed how GitLab Duo can assist in migrating applications between programming languages.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1036170482?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI-Powered Language Migration Made Simple\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Making DevSecOps work smarter\n\nDeveloper Advocate Abubakar Siddiq Ango showcased how GitLab's triage features can automate routine tasks.\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388290?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Making DEvOps Work Smarter\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 5. Community contributions making an impact\n\nDirector of Contributor Success Nick Veenhof shared how community contributions are shaping GitLab's development:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035395211?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Community Contributions Making an Impact\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand\n\n[Watch the complete broadcast recording](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857) for step-by-step demonstrations and insights from our experts. Also, be sure to [follow GitLab on LinkedIn](https://www.linkedin.com/company/gitlab-com) to stay up to date on our monthly broadcasts and get insights into our platform, DevSecOps, and software development.\n",[9,691,269,737,715],{"slug":2286,"featured":6,"template":693},"icymi-key-ai-and-security-insights-from-our-developer-community","content:en-us:blog:icymi-key-ai-and-security-insights-from-our-developer-community.yml","Icymi Key Ai And Security Insights From Our Developer Community","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community.yml","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"_path":2292,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2293,"content":2298,"config":2304,"_id":2306,"_type":14,"title":2307,"_source":16,"_file":2308,"_stem":2309,"_extension":19},"/en-us/blog/improve-ai-security-in-gitlab-with-composite-identities",{"title":2294,"description":2295,"ogTitle":2294,"ogDescription":2295,"noIndex":6,"ogImage":791,"ogUrl":2296,"ogSiteName":706,"ogType":707,"canonicalUrls":2296,"schema":2297},"Improve AI security in GitLab with composite identities","Learn how to implement AI features responsibly by applying authentication best practices with service accounts for AI agents in GitLab.","https://about.gitlab.com/blog/improve-ai-security-in-gitlab-with-composite-identities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improve AI security in GitLab with composite identities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grzegorz Bizon\"}],\n        \"datePublished\": \"2025-01-29\",\n      }",{"title":2294,"description":2295,"authors":2299,"heroImage":791,"date":2301,"body":2302,"category":691,"tags":2303},[2300],"Grzegorz Bizon","2025-01-29","Artificial intelligence (AI) is quickly becoming the backbone of modern software development, fueling developer efficiency and accelerating innovation. With the emergence of AI agents implementing code based on instructions from humans, we are learning that implementing AI-based features has its own unique set of security challenges. **How do we protect access to the resources AI needs, protect confidentiality, and avoid privilege escalation**? Few organizations are ready to answer these questions today. At GitLab, we are. We are introducing a new paradigm for identity management: composite identities.\n\nWhen AI agents are integrated into your DevSecOps workflows, previously simple questions become difficult to answer: Who authored this code? Who is the author of this merge request? Who created this Git commit? We found we had to start asking new questions: Who instructed an AI agent to generate this code? What context did AI need to build this feature? What were the resources AI had to access and read to generate the answer?\n\nTo answer these questions, we need to understand some fundamental aspects of AI’s identity:\n\n* Does an AI agent have its own distinct identity?\n* What is the representation of this identity?\n* How do we make it all secure?\n\n### Authentication and AI identity management \n\nWe are at the beginning of a paradigm shift in identity management in the software delivery lifecycle. Before the AI era, identity management was simpler. We had human user-based identities and machine-type identities using separate accounts.\n\nWith the emergence of AI and agentic workflows, the distinction between these two core types of identities has blurred. AI agents are supposed to work in an autonomous way, so it makes sense to think about them as machine-type accounts. On the other hand, AI agents are usually being instructed by human users, and require access to resources the human users have access to in order to complete their tasks. This introduces significant security risks — for example, the AI may provide human users with information they should not have access to. How do we avoid privilege escalation, provide auditability, and protect confidentiality in a world with AI agents?\n\n### The solution: Composite identities\n\nA composite identity is our new identity [principal](https://cloud.google.com/iam/docs/overview#concepts_related_identity), representing an AI agent’s identity that is linked with the identity of a human user who requests actions from the agent. **This enhances our ability to protect resources stored in GitLab**. Whenever an AI agent with a composite identity attempts to access a resource, we will not only authenticate the agent itself, but also link its principal with a human user who is instructing the agent, and will try to authorize both principals before granting access to a resource. Both principals need access; otherwise, the access will be denied. If an AI agent by itself can access a project, but a human user who instructed the agent to do so cannot, GitLab will deny the access.\n\nThe inverse is true as well — if a human user can access a confidential issue, but an AI agent can’t, then its service account will not be able to read the issue. We authorize access to every API request and for each resource an agent attempts to access this way. Composite identity without a request-scoped link to a human account will not be authorized to access any resource. For fully autonomous workloads we are also considering adding support for linking composite identities with other principals.\n\n#### Composite identity and service accounts\n\nWe redesigned our authorization framework to support composite identities, allowing multiple principals to be evaluated simultaneously when determining access rights to a resource. We enhanced our security infrastructure by implementing scoped identities across our entire system — from API requests to CI jobs and backend workers. These identities are linked to an AI agent's composite identity account also through OAuth tokens and CI job tokens. This project yielded unexpected security benefits, particularly in GitLab CI, where we upgraded job tokens to signed JSON web tokens (JWTs). Additionally, we contributed code to several open source libraries to add support for scoped identities.\n\n### Composite identity with GitLab Duo with Amazon Q\n\nIn the GitLab 17.8 release, we made composite identity for service accounts support available for customers through our GitLab Duo with Amazon Q integration. Amazon Q Developer agent will have composite identity enforced, which will protect your confidential GitLab resources from unauthorized access.\n\n### What’s next?\n\nTo learn more, check out our [composite identity docs](https://docs.gitlab.com/ee/development/ai_features/composite_identity.html).",[9,716,691],{"slug":2305,"featured":6,"template":693},"improve-ai-security-in-gitlab-with-composite-identities","content:en-us:blog:improve-ai-security-in-gitlab-with-composite-identities.yml","Improve Ai Security In Gitlab With Composite Identities","en-us/blog/improve-ai-security-in-gitlab-with-composite-identities.yml","en-us/blog/improve-ai-security-in-gitlab-with-composite-identities",{"_path":2311,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2312,"content":2318,"config":2324,"_id":2326,"_type":14,"title":2327,"_source":16,"_file":2328,"_stem":2329,"_extension":19},"/en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"title":2313,"description":2314,"ogTitle":2313,"ogDescription":2314,"noIndex":6,"ogImage":2315,"ogUrl":2316,"ogSiteName":706,"ogType":707,"canonicalUrls":2316,"schema":2317},"Inside look: How GitLab's Test Platform team validates AI features","Learn how we continuously analyze AI feature performance, including testing latency worldwide, and get to know our new AI continuous analysis tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099033/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750099033422.png","https://about.gitlab.com/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside look: How GitLab's Test Platform team validates AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Lapierre\"},{\"@type\":\"Person\",\"name\":\"Vincy Wilson\"}],\n        \"datePublished\": \"2024-06-03\",\n      }",{"title":2313,"description":2314,"authors":2319,"heroImage":2315,"date":1320,"body":2322,"category":10,"tags":2323},[2320,2321],"Mark Lapierre","Vincy Wilson","AI is increasingly becoming a centerpiece of software development - many companies are integrating it throughout their DevSecOps workflows to improve productivity and increase efficiency. Because of this now-critical role, AI features should be tested and analyzed on an ongoing basis. In this article, we take you behind the scenes to learn how [GitLab's Test Platform team](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) does this for [GitLab Duo](https://about.gitlab.com/gitlab-duo/) features by conducting performance validation, functional readiness, and continuous analysis across GitLab versions. With this three-pronged approach, GitLab aims to ensure that GitLab Duo features are performing optimally for our customers.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## AI and testing\n\nAI's non-deterministic nature, where the same input can produce different outputs, makes ensuring a great user experience a challenge. So, when we integrated AI deep into the GitLab DevSecOps Platform, we had to adapt to our best practices to address this challenge. \n\nThe [Test Platform team's mission ](https://handbook.gitlab.com/handbook/engineering/infrastructure/test-platform/) is to help enable the successful development and deployment of high-quality software applications with continuous analysis and efficiency to help ensure customer satisfaction. The key to achieving this is by delivering tools that help increase standardization, repeatability, and test consistency. \n\nApplying this to GitLab Duo, our AI suite of tools to power DevSecOps workflows, means being able to continuously analyze its performance and identify opportunities for improvement. Our goal is to gain clear, actionable insights that will help us to enhance GitLab Duo's capabilities and, as a result, better meet our customers' needs. \n\n## The need for continuous analysis of AI\n\nTo continuously assess GitLab Duo, we needed a mechanism for analyzing feature performance across releases. Therefore, we created an AI continuous analysis tool to automate the collection and analysis of data to achieve this. \n\n![diagram of how the AI continuous analysis tool works](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099041503.png)\n\n\u003Ccenter>\u003Ci>How the AI continuous analysis tool works\u003C/i>\u003C/center>\n\n### Building the AI continuous analysis tool\n\nTo gain detailed, user-centric insights, we needed to gather data in the appropriate context – in this case, the integrated development environment (IDE), as it is where most of our users access GitLab Duo. We narrowed this down further by opting for the Visual Studio Code IDE, a popular choice within our community. Once the environment was chosen, we automated entering code prompts and recording the provided suggestions. The interactions with the IDE are handled by the [WebdriverIO VSCode service](https://github.com/webdriverio-community/wdio-vscode-service), and CI operations are handled through [GitLab CI/CD](https://docs.gitlab.com/ee/ci/). This automation significantly scaled up data collection and eliminated repetitive tasks for GitLab team members. To start, we have focused on measuring the performance of GitLab Duo Code Suggestions, but plan to expand to other GitLab AI features in the future.\n\n### Analyzing the data\n\nAt the core of our AI continuous analysis tool is a mechanism for collecting and analyzing code suggestions. This involves automatically entering code prompts, recording the suggestions provided, and logging timestamps of relevant events. We measure the time from when the tool provides an input until a suggestion is displayed in the UI. In addition, we record the logs created by the IDE, which report the time it took for each suggestion response to be received. With this data, we can compare the latency of suggestions in terms of how long it takes the backend AI service to send a response to the IDE, and how long it takes for the IDE to display the suggestion for the user. We then can compare latency and other metrics of GitLab Duo features across multiple releases. The GitLab platform has the ability to analyze [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [application security](https://docs.gitlab.com/ee/user/application_security/), so we leverage these capabilities to enable the AI continuous analysis tool to analyze the quality and security of the suggestions provided by GitLab Duo.\n\n### Improving AI-driven suggestions\n\nOnce the collected data is analyzed, the tool automatically generates a single report summarizing the results. The report includes key statistics (e.g., mean latency and/or latency at various percentiles), descriptions of notable differences or patterns, links to raw data, and CI/CD pipeline logs and artifacts. The tool also records a video of each prompt and suggestion, which allows us to review specific cases where differences are highlighted. This creates an opportunity for the UX researchers and development teams to take action on the insights gained, helping to improve the overall user experience and system performance.\n\nThe tool is at an early stage of development, but it's already helped us to improve the experience for GitLab Duo Code Suggestions users. Moving forward, we plan to expand our tool’s capabilities, incorporate more metrics and consume and provide input to our [Centralized Evaluation Framework](https://about.gitlab.com/direction/ai-powered/ai_framework/ai_evaluation/), which validates AI models, to enhance our continuous analysis further.\n\n## Performance validation\n\nAs AI has become integral to GitLab's offerings, optimizing the performance of AI-driven features is essential. Our performance tests aim to evaluate and monitor the performance of our GitLab components, which interact with AI service backends. While we can monitor the performance of these external services as part of our production environment's observability, we cannot control them. Thus, including third-party services in our performance testing would be expensive and yield limited benefits. Although third-party AI providers contribute to overall latency, the latency attributable to GitLab components is still important to check. We aim to detect changes that might lead to performance degradation by monitoring GitLab components. \n\n### Building AI performance validation test environment\n\nIn our AI test environments, the [AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/#summary), which is a stand-alone service to give access to AI features to GitLab users, has been configured to return mocked responses, enabling us to test the performance of AI-powered features without interacting with third-party AI service providers. We conduct AI performance tests on [reference architecture environments of various sizes](https://docs.gitlab.com/ee/administration/reference_architectures/). Additionally, we evaluate new tests in their own isolated environment before they're added to the larger environments.\n\n### Testing multi-regional latency\n\nMulti-regional latency tests need to be run from various geolocations to validate that requests are being served from a suitable location close to the source of the request. We do this today with the use of the [GitLab Environment Toolkit](https://gitlab.com/gitlab-org/gitlab-environment-toolkit). The toolkit provisions an environment in the identified region to test (note: both the AI Gateway and the provisioned environment are in the same region), then uses the [GitLab Performance Tool](https://gitlab.com/gitlab-org/quality/performance) to run tests to measure time to first byte (TTFB). TTFB is our way of measuring time to the first part of the response being rendered, which contributes to the perceived latency that a customer experiences. To account for this measurement, our tests have a check to help ensure that the [response itself isn't empty](https://gitlab.com/gitlab-org/quality/performance/-/blob/cee8bef023e590e6ca75828e49f5c7c596581e06/k6/tests/experimental/api_v4_code_suggestions_generation_streaming.js#L70). \n\nOur tests are expanding further to continue to measure perceived latency from a customer’s perspective. We have captured a set of baseline response times that indicate how a specific set of regions performed when the test environment was in a known good state. These baselines allow us to compare subsequent environment updates and other regions to this known state to evaluate the impact of changes. These baseline measurements can be updated after major updates to ensure they stay relevant in the future. \n\nNote: As of this article's publication date, we have AI Gateway deployments across the U.S., Europe, and Asia. To learn more, visit our [handbook page](https://handbook.gitlab.com/handbook/engineering/development/data-science/ai-powered/ai-framework/#-aigw-region-deployments).\n\n## Functionality\n\nTo help continuously enable customers to confidently leverage AI reliably, we must continuously work to ensure our AI features function as expected.\n\n### Unit and integration tests\n\nFeatures that leverage AI models still require rigorous automated tests, which help engineers develop new features and changes confidently. However, since AI features can involve integrating with third-party AI providers, we must be careful to stub any external API calls to help ensure our tests are fast and reliable.\n\nFor a comprehensive look at testing at GitLab, look at our [testing standards and style guidelines](https://docs.gitlab.com/ee/development/testing_guide/). \n\n### End-to-end tests \n\nEnd-to-end testing is a strategy for checking whether the application works as expected across the entire software stack and architecture. We've implemented it in two ways for GitLab Duo testing: using real AI-generated responses and mock-generated AI responses.\n\n![validating features - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099041/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099041504.png)\n\n\u003Ccenter>\u003Ci>End-to-end test workflow\u003C/i>\u003C/center>\n\n#### Using real AI-generated responses\n\nAlthough costly, end-to-end tests are important to help ensure the entire user experience functions as expected. Since AI models are non-deterministic, end-to-end test assertions for validating real AI-generated responses should be loose enough to help ensure the feature functions without relying on a response that may change. This might mean an assertion that checks for some response with no errors or for a response we are certain to receive.\n\nAI-driven functionality is not accessible only from within the GitLab application, so we must also consider user workflows for other applications that leverage these features. For example, to cover the use case of a developer requesting code suggestions in [IntelliJ IDEA](https://www.jetbrains.com/idea/) using the GitLab Duo plugin, we need to drive the IntelliJ application to simulate a user workflow. Similarly, to ensure that the GitLab Duo Chat experience is consistent in VS Code, we must drive the VS Code application and exercise the GitLab Workflow extension. Working to ensure these workflows are covered helps us maintain a consistently great developer experience across all GitLab products. \n\n#### Using mock AI-generated responses\n\nIn addition to end-to-end tests using real AI-generated responses, we run some end-to-end tests against test environments configured to return mock responses. This allows us to verify changes to GitLab code and components that don’t depend on responses generated by an AI model more frequently.\n\n> For a closer look at end-to-end testing, read our [end-to-end testing guide](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/). \n\n### Exploratory testing and dogfooding\n\nAI features are built by humans for humans. At GitLab, exploratory testing and dogfooding greatly benefit us. GitLab team members are passionate about what features get shipped, and insights from internal usage are invaluable in shaping the direction of AI features.\n\n[Exploratory testing](https://about.gitlab.com/topics/devops/devops-test-automation/#test-automation-stages) allows the team to creatively exercise features to help ensure edge case bugs are identified and resolved. Dogfooding encourages team members to use AI features in their daily workflows, which helps us identify realistic issues from realistic users. For a comprehensive look at how we dogfood AI features, look at [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/).\n\n## Get started with GitLab Duo\nHopefully this article gives you insight into how we are validating AI features at GitLab. We have integrated our team's process into our overall development as we iterate on GitLab Duo features. We encourage you to try GitLab Duo in your organization and reap the benefits of AI-powered workflows.\n\n> Start a [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n\n_Members of the GitLab Test Platform team contributed to this article._\n",[9,717,496,1082,978,1083],{"slug":2325,"featured":91,"template":693},"inside-look-how-gitlabs-test-platform-team-validates-ai-features","content:en-us:blog:inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","Inside Look How Gitlabs Test Platform Team Validates Ai Features","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features.yml","en-us/blog/inside-look-how-gitlabs-test-platform-team-validates-ai-features",{"_path":2331,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2332,"content":2338,"config":2344,"_id":2346,"_type":14,"title":2347,"_source":16,"_file":2348,"_stem":2349,"_extension":19},"/en-us/blog/introducing-the-gitlab-ai-transparency-center",{"title":2333,"description":2334,"ogTitle":2333,"ogDescription":2334,"noIndex":6,"ogImage":2335,"ogUrl":2336,"ogSiteName":706,"ogType":707,"canonicalUrls":2336,"schema":2337},"Introducing the GitLab AI Transparency Center","This new initiative will help our community understand how we uphold governance and transparency in our AI products.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098448/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_4YvWyVQu8Q1g31ZVjlDOkH_1750098447812.png","https://about.gitlab.com/blog/introducing-the-gitlab-ai-transparency-center","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab AI Transparency Center\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2024-04-11\",\n      }",{"title":2333,"description":2334,"authors":2339,"heroImage":2335,"date":2341,"body":2342,"category":10,"tags":2343},[2340],"Robin Schulman","2024-04-11","GitLab is dedicated to responsibly integrating artificial intelligence (AI) throughout our comprehensive DevSecOps platform. We offer GitLab Duo, a [full suite of AI capabilities](https://about.gitlab.com/gitlab-duo/) across the GitLab platform, so that our customers can ship better, more secure software faster. GitLab Duo follows a privacy- and transparency-first approach to help customers confidently adopt AI while keeping their valuable assets protected.\n\nGenerative AI is moving so quickly and we know it presents a host of novel questions about the privacy and safety of this technology. In GitLab's [2023 State of AI in Software Development report](https://about.gitlab.com/developer-survey/#ai), more than 75% of respondents expressed concern about AI tools having access to private information or intellectual property. \n\n[Transparency is a core value at GitLab](https://handbook.gitlab.com/handbook/values/#transparency), and we take a transparency- and privacy-first approach to building our AI features to help ensure that our customers’ valuable intellectual property is protected. Accordingly, we’ve launched our [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to help GitLab’s customers, community, and team members better understand the ways in which GitLab upholds ethics and transparency in our AI-powered features.\n\nThe AI Transparency Center includes GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/), [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/), and our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html).\n\n## The AI Ethics Principles for Product Development explained\n\nWe believe ethics play an important role in building AI features. For this reason, we’ve launched GitLab’s [AI Ethics Principles for Product Development](https://handbook.gitlab.com/handbook/legal/ethics-compliance-program/ai-ethics-principles/) to address what we consider to be the best practices in responsible AI development. These Principles will help guide GitLab as we continue to build and evolve our AI functionality.\n\nThe Principles specifically address five key areas of concern that GitLab monitors so that we can continue to responsibly integrate AI into our customers’ workflows:\n\n- **Avoiding unfair bias.** [Diversity, Inclusion, and Belonging](https://about.gitlab.com/company/culture/inclusion/) is also one of GitLab’s core values. It is a critical consideration when building features powered by AI systems, as there is [evidence](https://fra.europa.eu/en/publication/2022/bias-algorithm) that AI systems may perpetuate human and societal biases. GitLab will continue to prioritize Diversity, Inclusion, and Belonging when building AI features.\n\n- **Safeguarding against security risks.** GitLab is a DevSecOps platform, which means we integrate security throughout our entire product, including in our AI features. While AI brings many potential security benefits, it can also create security risks if not deployed correctly. As we do with all of our features, our goal is to mitigate these security risks in GitLab’s AI features.\n\n- **Preventing potentially harmful uses.** We strive to build AI features responsibly. We try to carefully consider the potential consequences of our AI features in order to refrain from launching features that are likely to cause, or allow others to cause, overall harm.\n\n- **Considering what data our AI features use and how they use it.** We will continue to carefully evaluate the data that our AI features use, the purposes for which we’re using this data, and who owns the intellectual property and other rights to the data, just as we do with all of GitLab’s features.\n\n- **Holding ourselves accountable.** GitLab’s mission is to make it so that [everyone can contribute](https://about.gitlab.com/company/mission/), and we welcome feedback from the GitLab community about our AI features. We will in turn aim to share our AI ethics-related findings with others in the industry where possible. We also know that AI systems, and the risk mitigations we need to employ with them, will change over time, so we are committed to continuously reviewing and iterating on our AI features and these Principles.\n\n## The AI Continuity Plan explained\n\nUnlike other DevSecOps platforms, GitLab is not tied to a single AI model provider. Instead, our AI features are powered by a diverse set of models, which helps us support a wide range of use cases and gives our customers flexibility.\n\nWe carefully select our third-party AI vendors to ensure a commitment from the vendor that they will forgo the use of GitLab and GitLab customers’ content for the developing, training, and fine tuning of vendor models.\n\nOur new [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/) lays out GitLab’s processes when reviewing and selecting new third-party AI vendors, and when these AI vendors materially change their practices with respect to customer data.\n\n## AI features documentation \n\nIn keeping with GitLab’s core Transparency value, our [AI features documentation](https://docs.gitlab.com/ee/user/ai_features.html) clearly outlines our AI features’ purposes, underlying models, statuses, and privacy practices.\n\n## Visit the AI Transparency Center\n\nThe [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) is publicly available in keeping with our [Transparency value](https://handbook.gitlab.com/handbook/values/#transparency) and to encourage others in the AI industry and the GitLab community to take safety, privacy, and ethics into account when building their own AI-powered functionality.\n\nWe’re excited about the opportunities that responsible AI will bring, and will continue to build our AI features with ethics, privacy, and transparency in mind.\n",[9,1082,1181],{"slug":2345,"featured":6,"template":693},"introducing-the-gitlab-ai-transparency-center","content:en-us:blog:introducing-the-gitlab-ai-transparency-center.yml","Introducing The Gitlab Ai Transparency Center","en-us/blog/introducing-the-gitlab-ai-transparency-center.yml","en-us/blog/introducing-the-gitlab-ai-transparency-center",{"_path":2351,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2352,"content":2358,"config":2364,"_id":2366,"_type":14,"title":2367,"_source":16,"_file":2368,"_stem":2369,"_extension":19},"/en-us/blog/introducing-the-source-insights-for-the-future-of-software-development",{"title":2353,"description":2354,"ogTitle":2353,"ogDescription":2354,"noIndex":6,"ogImage":2355,"ogUrl":2356,"ogSiteName":706,"ogType":707,"canonicalUrls":2356,"schema":2357},"Introducing The Source: Insights for the future of software development","Explore our new publication for transformative software development strategies and expert advice on emerging technologies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674616/Blog/Hero%20Images/blog-image-template-1800x945__1_.png","https://about.gitlab.com/blog/introducing-the-source-insights-for-the-future-of-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing The Source: Insights for the future of software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chandler Gibbons\"}],\n        \"datePublished\": \"2024-10-29\",\n      }",{"title":2353,"description":2354,"authors":2359,"heroImage":2355,"date":2361,"body":2362,"category":1181,"tags":2363},[2360],"Chandler Gibbons","2024-10-29","Modern software development is transforming the way organizations create, deliver, and scale business value. Teams must be able to build solutions quickly and efficiently while navigating rising security threats, emerging technologies, and increasingly complex compliance demands.\n\nToday, GitLab is launching [The Source](https://about.gitlab.com/the-source/), a new publication that covers the evolution of software development as an engine for business success. We offer regular insights into the future of software development, supported by original research and analysis from our subject matter experts and thought leaders.\n\nOn The Source, you will find answers to questions such as:\n* How can leaders measure the ROI of AI across the software development lifecycle?\n* What’s the best way to ensure security and compliance across the entire software supply chain?\n* What types of efficiencies will teams see from platform and toolchain consolidation?\n\nHere’s a sample of what's on The Source today:\n\n**4 steps for measuring the impact of AI**\n\n\"Evaluating the productivity of AI-enhanced coding requires a more nuanced approach than traditional metrics such as lines of code, code commits, or task completion. It necessitates shifting the focus to real-world business outcomes that balance development speed, software quality, and security.\"\n- [Learn the 4 steps from AI expert Taylor McCaslin.](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/)\n\n**Addressing the root cause of common security frustrations**\n\n\"DevSecOps promises better integration between engineering and security, but it’s clear that frustrations and misalignment persist. That’s because these challenges are symptoms of a larger problem with how organizations view security, as well as how teams work together and how they allocate time to security.\"\n- [Solve this disconnect with expert advice from GitLab CISO Josh Lemos.](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/)\n\n**Driving business results with platform engineering**\n\n\"Platform engineering aims to normalize and standardize developer workflows by providing developers with optimized 'golden paths' for most of their workloads and flexibility to define exceptions for the rest.\"\n- [Discover GitLab Field CTO Brian Wald's best practices for platform engineering success.](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/)\n\n## Make The Source your decision-making partner\n\nVisit [The Source](https://about.gitlab.com/the-source/) today to explore the latest insights, get answers to your leadership questions, and learn something new to share with your teams. You can also subscribe to our newsletter for regular updates directly to your inbox. Join our community of forward-thinking technology leaders and help shape the future of software development.",[9,691,1181,716],{"slug":2365,"featured":91,"template":693},"introducing-the-source-insights-for-the-future-of-software-development","content:en-us:blog:introducing-the-source-insights-for-the-future-of-software-development.yml","Introducing The Source Insights For The Future Of Software Development","en-us/blog/introducing-the-source-insights-for-the-future-of-software-development.yml","en-us/blog/introducing-the-source-insights-for-the-future-of-software-development",{"_path":2371,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2372,"content":2378,"config":2383,"_id":2385,"_type":14,"title":2386,"_source":16,"_file":2387,"_stem":2388,"_extension":19},"/en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"title":2373,"description":2374,"ogTitle":2373,"ogDescription":2374,"noIndex":6,"ogImage":2375,"ogUrl":2376,"ogSiteName":706,"ogType":707,"canonicalUrls":2376,"schema":2377},"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment","Learn how to migrate from Jenkins to the integrated CI/CD of the GitLab DevSecOps Platform to deliver high-quality software rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":2373,"description":2374,"authors":2379,"heroImage":2375,"date":2380,"body":2381,"category":1201,"tags":2382},[774],"2023-11-01","\nIn today's dynamic landscape of software development, certain requirements have become paramount for delivering high-quality software rapidly. These requirements include the need for cloud compatibility, faster development cycles, improved collaboration, containerization, enhanced development experiences, and the integration of AI-driven capabilities for better efficiency and speed. Jenkins, a longstanding and respected continuous integration (CI) tool, has admirably played a role in many teams' software development for years. However, as more teams adopt DevOps/DevSecOps strategies for their software delivery, leveraging the integrated CI that is available in a DevSecOps platform like GitLab can provide benefits that Jenkins does not. \n\nSome organizations find themselves hesitating to migrate, not because they doubt the benefits of a top-tier [CI/CD](https://about.gitlab.com/topics/ci-cd/) solution such as GitLab, but due to the complexities of their existing Jenkins implementations. It's understandable that such a transition can seem daunting. \n\nIn this blog, you'll find several migration strategies to help transition from Jenkins to GitLab and make the process smoother and more manageable.\n\n## Migrating to GitLab\nIt's become evident that for organizations seeking a CI/CD solution that can seamlessly support their evolving demands, GitLab emerges as a powerful game-changer. Let's explore why transitioning to this advanced platform is transformative for Jenkins users.\n\n### Why migrate to GitLab \nBefore we delve into the migration approaches, let's take a moment to understand GitLab CI and what makes it a compelling choice for modern CI/CD needs.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n\n### GitLab CI overview\nGitLab CI is an integral part of the GitLab [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, which offers a comprehensive and unified solution for DevSecOps and CI/CD. GitLab's design revolves around streamlining development workflows, fostering collaboration, enhancing security, and ensuring scalability.\n\n### Key features of GitLab CI\nThese are the key features of GitLab CI:\n- **Unified platform:** GitLab CI is more than just a CI/CD tool; it's part of a broader ecosystem that includes source code management, project management, security features, analytics and more. This unified platform streamlines workflows and enhances collaboration among development teams.\n- **Containerization and orchestration:** GitLab CI/CD is designed with containerization in mind, offering native support for Docker and Kubernetes. This enables seamless integration of container technologies into your CI/CD pipelines.\n- **Security by design:** Security is a top priority, and GitLab CI incorporates features such as static code analysis and vulnerability scanning to help teams identify and address security issues early in the development process.\n- **GitOps principles:** GitLab CI aligns with [GitOps principles](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/), emphasizing version-controlled, declarative configurations for infrastructure and application deployments. This approach enhances the reliability and repeatability of deployments.\n\nGet familiar with GitLab CI with this tutorial:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA?si=T21Fe10Oa0rQ0SGB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith that understanding of GitLab CI's capabilities, let's explore the migration steps and strategies for Jenkins users looking to leverage the benefits of GitLab CI.\n\n## A recommended step-by-step Jenkins-to-GitLab CI migration\nWhen considering a migration from Jenkins to GitLab CI, we strongly recommend following a well-structured, step-by-step approach to ensure a seamless transition. Here's our recommended process:\n1. **Pipeline assessment:** Start by conducting a comprehensive inventory of all your existing pipelines in Jenkins. This initial step will help you gain a clear understanding of the scope and complexity of the migration.\n2. **Parallel migration:** Begin the migration process by selecting individual pipelines and moving them to GitLab CI one at a time. Continue to maintain the use of Jenkins for your ongoing work during this transition to minimize disruptions.\n3. **Code verification:** We advise beginning with verification checks in CI. Run both the Jenkins and GitLab CI pipelines in parallel. This dual approach allows you to directly compare the two workflows and identify any issues in the new GitLab workflows. During this phase, keep the GitLab workflow as an optional choice while Jenkins remains required.\n4. **Continuous validation:** After running both pipelines in parallel for a full iteration, thoroughly evaluate the outcomes from each pipeline. This evaluation should consider various factors, including status codes, logs, and performance. \n5. **GitLab CI transition:** As you gain confidence in the reliability and effectiveness of GitLab CI through the parallel runs, make the transition to the GitLab CI workflow as the required standard while Jenkins continues to operate in the background.\n6. **Jenkins phaseout:** After a second iteration, when you are confident in the performance and stability of GitLab CI, you can begin to remove the Jenkins job from your code verification pipeline. This successful transition will enable you to retire Jenkins from this particular aspect of your CI/CD process.\n\nThis recommended approach ensures that your migration is a gradual evolution, allowing you to identify and address any issues or discrepancies before fully committing to GitLab CI. Running Jenkins and GitLab CI pipelines in parallel provides valuable insights and ensures the effective streamlining of your CI/CD processes.\n\n## Preparing for migration: Training and communication\nTo ensure a smooth and successful migration from Jenkins to GitLab CI, follow these essential steps:\n- **Stakeholder communication:** Start by announcing your migration plans and timelines to all relevant stakeholders. This includes DevOps teams, developers, and QA engineers. Transparency in communication is crucial to ensure that everyone understands the objectives and expectations of the migration.\n- **Knowledge-level training:** Conduct knowledge-level training sessions for your teams to promote GitLab CI adoption.\nCover topics such as using GitLab CI, understanding the YAML syntax, and how to create a basic pipeline.\nProvide team members with the knowledge and skills necessary to navigate the new GitLab CI environment effectively.\n- **Hands-on learning:** Encourage hands-on learning by pairing up developers.\nCreate opportunities for them to learn from each other's experiences throughout the migration process.\n\nBy following these instructions for training and communication, you'll build a strong foundation for a successful migration, empowering your teams to adapt and thrive in the new environment.\n\n## 3 Jenkins-to-GitLab CI migration strategies\nThere are different strategies to consider. These three strategies offer flexibility, allowing organizations to choose the path that best aligns with their specific needs and resources. Let's explore these strategies in detail to help you make an informed decision about which one suits your organization best.\n\n### Migration Strategy 1: Using GitLab CI for new projects\nThe first migration strategy involves a gradual transition. While you maintain your existing Jenkins infrastructure for ongoing projects, you introduce GitLab CI for new projects. This approach allows you to harness the modern features of GitLab CI without disrupting your current work.\n\n#### Benefits of Migration Strategy 1\nThe benefits of this approach include the following:\n- New projects can leverage GitLab CI's advanced features right from the start. \n- This strategy minimizes the risk of disrupting existing workflows, as your existing Jenkins setup remains intact.\n- Your team can gradually adapt to GitLab CI, building confidence and expertise without the pressure of an immediate full-scale migration.\n\n#### Challenges of Migration Strategy 1\nThe challenges of this approach include the following:\n- Operating two CI/CD platforms simultaneously can introduce complexity, especially in terms of integration and team collaboration.\n- Managing projects on different platforms may require careful coordination to ensure consistency in processes and security practices.\n\nThis strategy offers a smooth and manageable transition by allowing you to harness GitLab CI's strengths for new projects, while your existing Jenkins infrastructure continues to support ongoing work.\n\n### Migration Strategy 2: Migrating only strategic projects\nIn this strategy, you identify specific projects within your organization that stand to benefit the most from the capabilities of GitLab CI. Instead of preparing for a wholesale migration, you start by focusing your efforts on migrating these strategically selected projects first.\n\n#### Benefits of Migration Strategy 2\nThe benefits of this approach include the following:\n- By concentrating on key projects, you can realize significant improvements in those areas where GitLab CI aligns with specific needs.\n- This approach reduces the complexity of migrating everything at once, minimizing the potential for disruptions.\n- You can gradually build confidence with GitLab CI and its benefits before considering further migrations.\n\n#### Challenges of Migration Strategy 2\nThe challenges of this approach include the following:\n- Even though you're not migrating all projects, the chosen projects' migration can still be intricate and require careful planning.\n- Ensuring seamless collaboration between projects on different platforms may require additional attention.\n\nThis strategy allows you to maximize the impact of GitLab CI by focusing on strategic areas, minimizing risk, and gradually gaining experience with the new tool.\n\n### Migration Strategy 3: Migrating everything\nThe third strategy is a comprehensive migration where you commit to moving all your CI/CD processes, projects, and workflows to GitLab CI. This approach aims for uniformity and simplification of CI/CD across all projects. This strategy can benefit from taking an iterative approach. Consider starting with new projects, followed by migrating strategic projects, and then leverage your growing knowledge and experience with GitLab CI to complete the migration of remaining projects. \n\n#### Benefits of Migration Strategy 3\nThe benefits of this approach include the following:\n- Uniform CI/CD processes across all projects can streamline administration and maintenance, reducing complexity.\n- You can take full advantage of GitLab CI's modern capabilities, from Infrastructure as Code to enhanced security features.\n- As your projects grow, GitLab CI is designed to handle increased demands, ensuring long-term scalability.\n\n#### Challenges of Migration Strategy 3\nThe challenges of this approach include the following:\n- A full-scale migration can be intricate, requiring meticulous planning and implementation.\n- The transition may disrupt ongoing projects and require a significant time investment.\n- Investment in training and potential tool migration expenses should be considered.\n\nOpt for this approach if uniformity and consolidation of CI/CD processes are a high priority, and you have the resources to execute a full migration.\n\nThe migration strategy you select should align with your organization's specific needs and circumstances. In all cases, the ultimate goal is to enhance your development process with modern CI/CD tools like GitLab CI, which offers scalability, infrastructure automation, security, and collaboration features that align with today's development needs.\n\n## Technical insights: How the migration works\nMoving your CI/CD workflows from Jenkins to GitLab CI is a transformative journey, and understanding how it works is vital for a successful transition.\n\n### Understanding the configurations: Jenkinsfile vs. .gitlab-ci.yml\nThe heart of your CI/CD pipeline lies in the configurations defined in your Jenkinsfile (for Jenkins) and .gitlab-ci.yml (for GitLab CI). While there are some similarities between these configuration files, there are notable differences as well.\n\n#### Similarities\n- Both files define the stages, jobs, and steps of your CI/CD process.\n- You specify the desired build, test, and deployment steps in both files.\n- Environment variables and settings can be configured in either file.\n\n#### Differences\n- Jenkinsfile uses Groovy for scripting, while .gitlab-ci.yml uses YAML. This change in language affects the way you write and structure your configurations.\n- The process of defining pipelines is more intuitive in .gitlab-ci.yml, with a cleaner, more human-readable syntax.\n- GitLab CI provides a wide range of built-in templates and predefined jobs, simplifying configuration and reducing the need for custom scripting.\n\n### Manually converting the pipeline configuration\nCurrently, migrating your existing Jenkins pipelines to GitLab CI is typically done manually. This means analyzing your Jenkinsfile and re-creating the equivalent configurations in .gitlab-ci.yml. While there are similarities in the concepts and structure, the differences in syntax and the specific capabilities of each platform require careful consideration during the migration.\n\n## Strategic planning for a smooth transition\nMigrating from Jenkins to GitLab CI requires meticulous planning to ensure a seamless transition. It's crucial to assess the disparities between the two systems and evaluate their impact on your workflow, considering aspects like security, cost, time, and capacity.\n\nOnce you've identified these differences and devised your migration strategy, break down the migration into key steps. These include setting up GitLab CI pipelines, securely transferring data from Jenkins to GitLab CI, and integrating GitLab CI into your existing tools and processes. \n\n## Case study: A seamless transition for Lockheed Martin\nLet's look at a real-world case study to illustrate the effectiveness of the \"Migrate Everything\" strategy. [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), the world’s largest defense contractor, had been using Jenkins for several years. As their project portfolio expanded, they realized that their Jenkins implementation with a wide variety of DevOps tools was becoming increasingly complex to manage. They were also eager to adopt modern CI/CD capabilities that Jenkins struggled to provide.\n\nIn collaboration with GitLab, Lockheed Martin decided to undertake a comprehensive migration to GitLab CI. Their goals included achieving consistency in their CI/CD processes, simplifying administration and maintenance, and taking full advantage of The GitLab Platform’s robust features.\n\nThe comprehensive migration strategy proved to be a resounding success for Lockheed Martin. With GitLab CI, they not only streamlined their CI/CD processes but achieved remarkable results. **They managed to run CI pipeline builds a staggering 80 times faster, retired thousands of Jenkins servers, and reduced the time spent on system maintenance by a staggering 90%. This monumental shift resulted in a significant increase in efficiency and productivity for Lockheed Martin.**\n\nThis case study showcases how a comprehensive migration strategy can be effective for organizations looking to leverage GitLab capabilities across all their projects.\n\nFor more in-depth insights into Lockheed Martin's successful transition to GitLab and how it streamlined their software development processes, check out [the detailed case study](https://about.gitlab.com/customers/lockheed-martin/).\n\n## GitLab documentation and support\nFor those embarking on this migration journey, GitLab offers documentation to guide you through the process. You can find valuable resources in GitLab's [official documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nIn addition to documentation, GitLab's Professional Services team is available to assist organizations in their migrations. They bring expertise and experience to ensure a smooth transition. Whether it's understanding the nuances of Jenkinsfile to .gitlab-ci.yml conversion or optimizing your CI/CD workflows, their support can be invaluable.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n",[737,279,9,233,716,917],{"slug":2384,"featured":6,"template":693},"jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","content:en-us:blog:jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","Jenkins Gitlab Ultimate Guide To Modernizing Cicd Environment","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"_path":2390,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2391,"content":2397,"config":2403,"_id":2405,"_type":14,"title":2406,"_source":16,"_file":2407,"_stem":2408,"_extension":19},"/en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"title":2392,"description":2393,"ogTitle":2392,"ogDescription":2393,"noIndex":6,"ogImage":2394,"ogUrl":2395,"ogSiteName":706,"ogType":707,"canonicalUrls":2395,"schema":2396},"Learn advanced Rust programming with a little help from AI","Use this guided tutorial, along with AI-powered GitLab Duo Code Suggestions, to continue learning advanced Rust programming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662439/Blog/Hero%20Images/codewithheart.png","https://about.gitlab.com/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn advanced Rust programming with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-10-12\",\n      }",{"title":2392,"description":2393,"authors":2398,"heroImage":2394,"date":2399,"body":2400,"category":10,"tags":2401},[1262],"2023-10-12","When I started learning a new programming language more than 20 years ago,\nwe had access to the Visual Studio 6 MSDN library, installed from 6 CD-ROMs.\nAlgorithms with pen and paper, design pattern books, and MSDN queries to\nfigure out the correct type were often time-consuming. Learning a new\nprogramming language changed fundamentally in the era of remote\ncollaboration and artificial intelligence (AI). Now you can spin up a\n[remote development\nworkspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/),\nshare your screen, and engage in a group programming session. With the help\nof [GitLab Duo Code Suggestions](/gitlab-duo/), you always have an\nintelligent partner at your fingertips. Code Suggestions can learn from your\nprogramming style and experience. They only need input and context to\nprovide you with the most efficient suggestions.\n\n\nIn this tutorial, we build on the [getting started blog\npost](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/)\nand design and create a simple feed reader application.\n\n\n- [Preparations](#preparations)\n    - [Code Suggestions](#code-suggestions)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Hello, Reader App](#hello-reader-app)\n    - [Initialize project](#initialize-project)\n    - [Define RSS feed URLs](#define-rss-feed-urls)\n- [Modules](#modules)\n    - [Call the module function in main()](#call-the-module-function-in-main)\n- [Crates](#crates)\n    - [feed-rs: parse XML feed](#feed-rs-parse-xml-feed)\n- [Runtime configuration: Program\narguments](#runtime-configuration-program-arguments)\n    - [User input error handling](#user-input-error-handling)\n- [Persistence and data storage](#persistence-and-data-storage)\n\n- [Optimization](#optimization)\n    - [Asynchronous execution](#asynchronous-execution)\n    - [Spawning threads](#spawning-threads)\n    - [Function scopes, threads, and closures](#function-scopes-threads-and-closures)\n- [Parse feed XML into objects](#parse-feed-xml-into-object-types)\n    - [Map generic feed data types](#map-generic-feed-data-types)\n    - [Error handling with Option::unwrap()](#error-handling-with-option-unwrap)\n- [Benchmarks](#benchmarks)\n    - [Sequential vs. Parallel execution benchmark](#sequential-vs-parallel-execution-benchmark)\n    - [CI/CD with Rust caching](#cicd-with-rust-caching)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations\n\nBefore diving into the source code, make sure to set up [VS\nCode](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#vs-code)\nand [your development environment with\nRust](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#development-environment-for-rust).\n\n\n### Code Suggestions\n\nFamiliarize yourself with suggestions before actually verifying the\nsuggestions. GitLab Duo Code Suggestions are provided as you type, so you do\nnot need use specific keyboard shortcuts. To accept a code suggestion, press\nthe `tab` key. Also note that writing new code works more reliably than\nrefactoring existing code. AI is non-deterministic, which means that the\nsame suggestion may not be repeated after deleting the code suggestion.\nWhile Code Suggestions is in Beta, we are working on improving the accuracy\nof generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience.\n\n\n**Tip:** The latest release of Code Suggestions supports multi-line\ninstructions. You can refine the specifications to your needs to get better\nsuggestions.\n\n\n```rust\n    // Create a function that iterates over the source array\n    // and fetches the data using HTTP from the RSS feed items.\n    // Store the results in a new hash map.\n    // Print the hash map to the terminal.\n```\n\n\nThe VS Code extension overlay is shown when offering a suggestion. You can\nuse the `tab` key to accept the suggested line(s), or `cmd cursor right` to\naccept one word. Additionally, the three dots menu allows you to always show\nthe toolbar.\n\n\n![VS Code GitLab Duo Code Suggestions overlay with\ninstructions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_code_suggestions_options_overlay_keep_toolbar.png){:\n.shadow}\n\n\n## Continue learning Rust\n\nNow, let us continue learning Rust, which is one of the [supported languages\nin Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages).\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides an\nexcellent tutorial for beginners, together with the official [Rust\nbook](https://doc.rust-lang.org/book/). Both resources are referenced\nthroughout this blog post.\n\n\n### Hello, Reader App\n\nThere are many ways to create an application and learn Rust. Some of them\ninvolve using existing Rust libraries - so-called `Crates`. We will use them\na bit further into the blog post. For example, you could create a\ncommand-line app that processes images and writes the result to a file.\nSolving a classic maze or writing a Sudoku solver can also be a fun\nchallenge. Game development is another option. The book [Hands-on\nRust](https://hands-on-rust.com/) provides a thorough learning path by\ncreating a dungeon crawler game. My colleague Fatima Sarah Khalid started\nthe [Dragon Realm in C++ with a little help from\nAI](/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)\n-- check it out, too.\n\n\nHere is a real use case that helps solve an actual problem: Collecting\nimportant information from different sources into RSS feeds for (security)\nreleases, blog posts, and social discussion forums like Hacker News. Often,\nwe want to filter for specific keywords or versions mentioned in the\nupdates. These requirements allow us to formulate a requirements list for\nour application:\n\n\n1. Fetch data from different sources (HTTP websites, REST API, RSS feeds).\nRSS feeds in the first iteration.\n\n1. Parse the data.\n\n1. Present the data to the user, or write it to disk.\n\n1. Optimize performance.\n\n\nThe following example application output will be available after the\nlearning steps in this blog post:\n\n\n![VS Code Terminal, cargo run with formatted feed entries\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n\nThe application should be modular and build the foundation to add more data\ntypes, filters, and hooks to trigger actions at a later point.\n\n\n### Initialize project\n\nReminder: `cargo init` in the project root creates the file structure,\nincluding the `main()` entrypoint. Therefore, we will learn how to create\nand use Rust modules in the next step.\n\n\nCreate a new directory called `learn-rust-ai-app-reader`, change into it and\nrun `cargo init`. This command implicitly runs `git init` to initialize a\nnew Git repository locally. The remaining step is to configure the Git\nremote repository path, for example,\n`https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader`.\nPlease adjust the path for your namespace. Pushing the Git repository\n[automatically creates a new private project in\nGitLab](https://docs.gitlab.com/ee/user/project/#create-a-new-project-with-git-push).\n\n\n```shell\n\nmkdir learn-rust-ai-app-reader\n\ncd learn-rust-ai-app-reader\n\n\ncargo init\n\n\ngit remote add origin\nhttps://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader.git\n\ngit push --set-upstream origin main\n\n```\n\n\nOpen VS Code from the newly created directory. The `code` CLI will spawn a\nnew VS Code window on macOS.\n\n\n```shell\n\ncode .\n\n```\n\n\n### Define RSS feed URLs\n\nAdd a new hashmap to store the RSS feed URLs inside the `src/main.rs` file\nin the `main()` function. You can instruct GitLab Duo Code Suggestions with\na multi-line comment to create a\n[`HashMap`](https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html)\nobject, and initialize it with default values for Hacker News, and\nTechCrunch. Note: Verify that the URLs are correct when you get suggestions.\n\n\n```rust\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n\n}\n\n```\n\n\nNote that the code comment provides instructions for:\n\n\n1. The variable name `rss_feeds`.\n\n2. The `HashMap` type.\n\n3. Initial seed key/value pairs.\n\n4. String as type (can be seen with `to_string()` calls).\n\n\nOne possible suggested path can be as follows:\n\n\n```rust\n\nuse std::collections::HashMap;\n\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n}\n\n```\n\n\n![VS Code with Code Suggestions for RSS feed URLs for Hacker News and\nTechCrunch](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_main_array_rss_feed_urls_suggested.png)\n\n\nOpen a new terminal in VS Code (cmd shift p - search for `terminal`), and\nrun `cargo build` to build the changes. The error message instructs you to\nadd the `use std::collections::HashMap;` import.\n\n\nThe next step is to do something with the RSS feed URLs. [The previous blog\npost](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/)\ntaught us to split code into functions. We want to organize the code more\nmodularly for our reader application, and use Rust modules.\n\n\n## Modules\n\n[Modules](https://doc.rust-lang.org/rust-by-example/mod.html) help with\norganizing code. They can also be used to hide functions into the module\nscope, limiting access to them from the main() scope. In our reader\napplication, we want to fetch the RSS feed content, and parse the XML\nresponse. The `main()` caller should only be able to access the\n`get_feeds()` function, while other functionality is only available in the\nmodule.\n\n\nCreate a new file `feed_reader.rs` in the `src/` directory. Instruct Code\nSuggestions to create a public module named `feed_reader`, and a public\nfunction `get_feeds()` with a String HashMap as input. Important: The file\nand module names need to be the same, following the [Rust module\nstructure](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).\n\n\n![Code Suggestions: Create public module, with function and input\ntypes](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){:\n.shadow}\n\n\nInstructing Code Suggestions with the input variable name and type will also\nimport the required `std::collections::HashMap` module. Tip: Experiment with\nthe comments, and refine the variable types to land the best results.\nPassing function parameters as object references is considered best practice\nin Rust, for example.\n\n\n```rust\n\n// Create public module feed_reader\n\n// Define get_feeds() function which takes rss_feeds as String HashMap\nreference as input\n\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Do something with the RSS feeds\n    }\n}\n\n```\n\n\n![Code Suggestions: Public module with `get_feeds()` function, and suggested\ninput\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){:\n.shadow}\n\n\nInside the function, continue to instruct Code Suggestions with the\nfollowing steps:\n\n\n1. `// Iterate over the RSS feed URLs`\n\n2. `// Fetch URL content`\n\n3. `// Parse XML body`\n\n4. `// Print the result`\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 1:\nIterate](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_01_iterate.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 2: Fetch\nURL\ncontent](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_02_fetch_content.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 3: Parse\nXML\nbody](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_03_parse_body.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 4: Print\nthe\nresults](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_04_print_result.png){:\n.shadow}\n\n\nThe following code can be suggested:\n\n\n```rust\n\n// Create public module feed_reader\n\n// Define get_feeds() function which takes rss_feeds as String HashMap\nreference as input\n\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Iterate over the RSS feed URLs\n        for (name, url) in rss_feeds {\n            println!(\"{}: {}\", name, url);\n\n            // Fetch URL content\n            let body = reqwest::blocking::get(url).unwrap().text().unwrap();\n\n            // Parse XML body\n            let parsed_body = roxmltree::Document::parse(&body).unwrap();\n\n            // Print the result\n            println!(\"{:#?}\", parsed_body);\n        }\n    }\n}\n\n```\n\n\nYou see a new keyword here:\n[`unwrap()`](https://doc.rust-lang.org/rust-by-example/error/option_unwrap.html).\nRust does not support `null` values, and uses the [`Option`\ntype](https://doc.rust-lang.org/rust-by-example/std/option.html) for any\nvalue. If you are certain to use a specific wrapped type, for example,\n`Text` or `String`, you can call the `unwrap()` method to get the value. The\n`unwrap()` method will panic if the value is `None`.\n\n\n**Note** Code Suggestions referred to the `reqwest::blocking::get` function\nfor the `// Fetch URL content` comment instruction. The [`reqwest`\ncrate](https://docs.rs/reqwest/latest/reqwest/) name is intentional and not\na typo. It provides a convenient, higher-level HTTP client for async and\nblocking requests.\n\n\nParsing the XML body is tricky - you might get different results, and the\nschema is not the same for every RSS feed URL. Let us try to call the\n`get_feeds()` function, and then work on improving the code.\n\n\n### Call the module function in main()\n\n\nThe main() function does not know about the `get_feeds()` function yet, so\nwe need to import its module. In other programming languages, you might have\nseen the keywords `include` or `import`. The Rust module system is\ndifferent.\n\n\nModules are organized in path directories. In our example, both source files\nexist on the same directory level. `feed_reader.rs` is interpreted as crate,\ncontaining one module called `feed_reader`, which defines the function\n`get_feeds()`.\n\n\n```\n\nsrc/\n  main.rs\n  feed_reader.rs\n```\n\n\nIn order to access `get_feeds()` from the `feed_reader.rs` file, we need to\n[bring module\npath](https://doc.rust-lang.org/book/ch07-04-bringing-paths-into-scope-with-the-use-keyword.html)\ninto the `main.rs` scope first, and then call the full function path.\n\n\n```rust\n\nmod feed_reader;\n\n\nfn main() {\n\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n\n```\n\n\nAlternatively, we can import the full function path with the `use` keyword,\nand later use the short function name.\n\n\n```rust\n\nmod feed_reader;\n\nuse feed_reader::feed_reader::get_feeds;\n\n\nfn main() {\n\n    get_feeds(&rss_feeds);\n\n```\n\n\n**Tip:** I highly recommend reading the [Clear explanation of the Rust\nmodule system blog\npost](https://www.sheshbabu.com/posts/rust-module-system/) to get a better\nvisual understanding.\n\n\n```diff\n\n\nfn main() {\n    // ...\n\n    // Print feed_reader get_feeds() output\n    println!(\"{}\", feed_reader::get_feeds(&rss_feeds));\n```\n\n\n```rust\n\nuse std::collections::HashMap;\n\n\nmod feed_reader;\n\n// Alternative: Import full function path\n\n//use feed_reader::feed_reader::get_feeds;\n\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n\n```\n\n\nRun `cargo build` in the terminal again to build the code.\n\n\n```shell\n\ncargo build\n\n```\n\n\nPotential build errors when Code Suggestions refer to common code and\nlibraries for HTTP requests, and XML parsing:\n\n\n1. Error: `could not find blocking in reqwest`. Solution: Enable the\n`blocking` feature for the crate in `Config.toml`: `reqwest = { version =\n\"0.11.20\", features = [\"blocking\"] }`.\n\n2. Error: `failed to resolve: use of undeclared crate or module reqwest`.\nSolution: Add the `reqwest` crate.\n\n3. Error: `failed to resolve: use of undeclared crate or module roxmltree`.\nSolution: Add the `roxmltree` crate.\n\n\n```shell\n\nvim Config.toml\n\n\nreqwest = { version = \"0.11.20\", features = [\"blocking\"] }\n\n```\n\n\n```shell\n\ncargo add reqwest\n\ncargo add roxmltree\n\n```\n\n\n**Tip:** Copy the error message string, with a leading `Rust \u003Cerror\nmessage>` into your preferred browser to check whether a missing crate is\navailable. Usually this search leads to a result on crates.io and you can\nadd the missing dependencies.\n\n\nWhen the build is successful, run the code with `cargo run` and inspect the\nHacker News RSS feed output.\n\n\n![VS Code terminal, cargo run to fetch Hacker News XML\nfeed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news.png){:\n.shadow}\n\n\nWhat is next with parsing the XML body into human-readable format? In the\nnext section, we will learn about existing solutions and how Rust crates\ncome into play.\n\n\n## Crates\n\nRSS feeds share a common set of protocols and specifications. It feels like\nreinventing the wheel to parse XML items and understand the lower object\nstructure. Recommendation for these types of tasks: Look whether someone\nelse had the same problem already and might have created code to solve the\nproblem.\n\n\nReusable library code in Rust is organized in so-called\n[`Crates`](https://doc.rust-lang.org/rust-by-example/crates.html), and made\navailable in packages, and the package registry on crates.io. You can add\nthese dependencies to your project by editing the `Config.toml` in the\n`[dependencies]` section, or using `cargo add \u003Cname>`.\n\n\nFor the reader app, we want to use the [feed-rs\ncrate](https://crates.io/crates/feed-rs). Open a new terminal, and run the\nfollowing command:\n\n\n```shell\n\ncargo add feed-rs\n\n```\n\n\n![VS Code Terminal Terminal: Add crate, verify in\nConfig.toml](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_rust_crate_add_feed-rs_explained.png)\n\n\n### feed-rs: parse XML feed\n\nNavigate into `src/feed_reader.rs` and modify the part where we parse the\nXML body. Code Suggestions understands how to call the `feed-rs` crate\n`parser::parse` function -- there is only one specialty here: `feed-rs`\n[expects string input as raw\nbytes](https://docs.rs/feed-rs/latest/feed_rs/parser/fn.parse_with_uri.html)\nto determine the encoding itself. We can provide instructions in the comment\nto get the expected result though.\n\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n```\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 5:\nModify XML parser to\nfeed-rs](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_05_use_feed_rs_to_parse.png){:\n.shadow}\n\n\nThe benefit of using `feed-rs` is not immediately visible until you see the\nprinted output with `cargo run`: All keys and values are mapped to their\nrespective Rust object types, and can be used for further operations.\n\n\n![VS Code terminal, cargo run to fetch Hacker News XML\nfeed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news_feed_rs.png){:\n.shadow}\n\n\n## Runtime configuration: Program arguments\n\nUntil now, we have run the program with hard-coded RSS feed values compiled\ninto the binary. The next step is allowing to configure the RSS feeds at\nruntime.\n\n\nRust provides [program\narguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg.html) in\nthe standard misc library. [Parsing the\narguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg/matching.html)\nprovides a better and faster learning experience than aiming for advanced\nprogram argument parsers (for example, the\n[clap](https://docs.rs/clap/latest/clap/) crate), or moving the program\nparameters into a configuration file and format\n([TOML](https://toml.io/en/), YAML). You are reading these lines after I\ntried and failed with different routes for the best learning experience.\nThis should not stop you from taking the challenge to configure RSS feeds in\nalternative ways.\n\n\nAs a boring solution, the command parameters can be passed as `\"name,url\"`\nstring value pairs, and then are split by the `,` character to extract the\nname and URL values. The comment instructs Code Suggestions to perform these\noperations and extend the `rss_feeds` HashMap with the new values. Note that\nthe variable might not be mutable, and, therefore, needs to be modified to\n`let mut rss_feeds`.\n\n\nNavigate into `src/main.rs` and add the following code to the `main()`\nfunction after the `rss_feeds` variable. Start with a comment to define the\nprogram arguments, and check the suggested code snippets.\n\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n```\n\n\n![Code suggestions for program arguments, and splitting name,URL values for\nthe rss_feeds\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_program_args_boring_solution.png){:\n.shadow}\n\n\nThe full code example can look like the following:\n\n\n```rust\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let mut rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n\n```\n\n\nYou can pass program arguments directly to the `cargo run` command,\npreceding the arguments with `--`. Enclose all arguments with double quotes,\nput the name followed by a comma and the RSS feed URL as argument. Separate\nall arguments with whitespaces.\n\n\n```\n\ncargo build\n\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\n![VS Code terminal, RSS feed output example for the GitLab\nblog](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_gitlab_blog_rss_feed_example.png){:\n.shadow}\n\n\n### User input error handling\n\nIf the provided user input does not match the program expectation, we need\nto [throw an error](https://doc.rust-lang.org/rust-by-example/error.html)\nand help the caller to fix the program arguments. For example, passing a\nmalformed URL format should be treated as a runtime error. Instruct Code\nSuggestions with a code comment to throw an error if the URL is not valid.\n\n\n```rust\n    // Ensure that URL contains a valid format, otherwise throw an error\n```\n\n\nOne possible solution is to check if the `url` variable starts with\n`http://` or `https://`. If not, throw an error using the [panic!\nmacro](https://doc.rust-lang.org/rust-by-example/std/panic.html). The full\ncode example looks like the following:\n\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n\n        // Ensure that URL contains a valid format, otherwise throw an error\n        if !url.starts_with(\"http://\") && !url.starts_with(\"https://\") {\n            panic!(\"Invalid URL format: {}\", url);\n        }\n\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n```\n\n\nTest the error handling with removing a `:` in one of the URL strings. Add\nthe `RUST_BACKTRACE=full` environment variable to get more verbose output\nwhen the `panic()` call happens.\n\n\n```\n\nRUST_BACKTRACE=full cargo run -- \"GitLab\nBlog,https://about.gitlab.com/atom.xml\" \"CNCF,https//www.cncf.io/feed/\"\n\n```\n\n\n![VS Code Terminal with wrong URL format, panic error\nbacktrace](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_url_format_error_panic_backtrace.png){:\n.shadow}\n\n\n## Persistence and data storage\n\nThe boring solution for storing the feed data is to dump the parsed body\ninto a new file. Instruct Code Suggestions to use a pattern that includes\nthe RSS feed name, and the current ISO date.\n\n\n```rust\n    // Parse XML body with feed_rs parser, input in bytes\n    let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n\n    // Print the result\n    println!(\"{:#?}\", parsed_body);\n\n    // Dump the parsed body to a file, as name-current-iso-date.xml\n    let now = chrono::offset::Local::now();\n    let filename = format!(\"{}-{}.xml\", name, now.format(\"%Y-%m-%d\"));\n    let mut file = std::fs::File::create(filename).unwrap();\n    file.write_all(body.as_bytes()).unwrap();\n```\n\n\nA possible suggestion will include using the [chrono\ncrate](https://crates.io/crates/chrono). Add it using `cargo add chrono` and\nthen invoke `cargo build` and `cargo run` again.\n\n\nThe files are written into the same directory where `cargo run` was\nexecuted. If you are executing the binary direcly in the `target/debug/`\ndirectory, all files will be dumped there.\n\n\n![VS Code with CNCF RSS feed content file, saved on\ndisk](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_cncf_rss_feed_saved_on_disk.png)\n\n\n## Optimization\n\nThe entries in the `rss_feeds` variable are executed sequentially. Imagine\nhaving a list of 100+ URLs configured - this could take a long time to fetch\nand process. What if we could execute multiple fetch requests in parallel?\n\n\n### Asynchronous execution\n\nRust provides [threads](https://doc.rust-lang.org/book/ch16-01-threads.html)\nfor asynchronous execution.\n\n\nThe simplest solution will be spawning a thread for each RSS feed URL. We\nwill discuss optimization strategies later. Before you continue with\nparallel execution, measure the sequential code execution time by preceding\nthe `time` command with `cargo run`.\n\n\n```\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.21s user 0.08s system 10% cpu 2.898 total\n\n```\n\n\nNote that this exercise could require more manual code work. It is\nrecommended to persist the sequential working state in a new Git commit and\nbranch `sequential-exec`, to better compare the impact of parallel\nexecution.\n\n\n```shell\n\ngit commit -avm \"Sequential execution working\"\n\ngit checkout -b sequential-exec\n\ngit push -u origin sequential-exec\n\n\ngit checkout main\n\n```\n\n\n### Spawning threads\n\nOpen `src/feed_reader.rs` and refactor the `get_feeds()` function. Start\nwith a Git commit for the current state, and then delete the contents of the\nfunction scope. Add the following code comments with instructions for Code\nSuggestions:\n\n\n1. `// Store threads in vector`: Store thread handles in a vector, so we can\nwait for them to finish at the end of the function call.\n\n2. `// Loop over rss_feeds and spawn threads`: Create boilerplate code for\niterating over all RSS feeds, and spawn a new thread.\n\n\nAdd the following `use` statements to work with the `thread` and `time`\nmodules.\n\n\n```rust\n    use std::thread;\n    use std::time::Duration;\n```\n\n\nContinue writing the code, and close the for loop. Code Suggestions will\nautomatically propose adding the thread handle in the `threads` vector\nvariable, and offer to join the threads at the end of the function.\n\n\n```rust\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n        // Store threads in vector\n        let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n        // Loop over rss_feeds and spawn threads\n        for (name, url) in rss_feeds {\n            let thread_name = name.clone();\n            let thread_url = url.clone();\n            let thread = thread::spawn(move || {\n\n            });\n            threads.push(thread);\n        }\n\n        // Join threads\n        for thread in threads {\n            thread.join().unwrap();\n        }\n    }\n```\n\n\nAdd the `thread` crate, build and run the code again.\n\n\n```shell\n\ncargo add thread\n\n\ncargo build\n\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\nAt this stage, no data is processed or printed. Before we continue re-adding\nthe functionality, let us learn about the newly introduced keywords here.\n\n\n### Function scopes, threads, and closures\n\nThe suggested code brings new keywords and design patterns to learn. The\nthread handle is of the type `thread::JoinHandle`, indicating that we can\nuse it to wait for the threads to finish\n([join()](https://doc.rust-lang.org/book/ch16-01-threads.html#waiting-for-all-threads-to-finish-using-join-handles)).\n\n\n`thread::spawn()` spawns a new thread, where we can pass a function object.\nIn this case, a\n[closure](https://doc.rust-lang.org/book/ch13-01-closures.html) expression\nis passed as anonymous function. Closure inputs are passed using the `||`\nsyntax. You will recognize the [`move`\nClosure](https://doc.rust-lang.org/book/ch16-01-threads.html#using-move-closures-with-threads),\nwhich moves the function scoped variables into the thread scope. This avoids\nmanually specifying which variables need to be passed into the new\nfunction/closure scope.\n\n\nThere is a limitation though: `rss_feeds` is a reference `&`, passed as\nparameter by the `get_feeds()` function caller. The variable is only valid\nin the function scope. Use the following code snippet to provoke this error:\n\n\n```rust\n\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (key, value) in rss_feeds {\n        let thread = thread::spawn(move || {\n            println!(\"{}\", key);\n        });\n    }\n}\n\n```\n\n\n![VS Code Terminal, variable scope error with references and thread move\nclosure](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_build_error_function_threads_variable_scopes.png){:\n.shadow}\n\n\nAlthough the `key` variable was created in the function scope, it references\nthe `rss_feeds` variable, and therefore, it cannot be moved into the thread\nscope. Any values accessed from the function parameter `rss_feeds` hash map\nwill require a local copy with `clone()`.\n\n\n![VS Code Terminal, thread spawn with\nclone](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_thread_spawn_clone.png){:\n.shadow}\n\n\n```rust\n\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (name, url) in rss_feeds {\n        let thread_name = name.clone();\n        let thread_url = url.clone();\n        let thread = thread::spawn(move || {\n            // Use thread_name and thread_url as values, see next chapter for instructions.\n```\n\n\n## Parse feed XML into object types\n\nThe next step is to repeat the RSS feed parsing steps in the thread closure.\nAdd the following code comments with instructions for Code Suggestions:\n\n\n1. `// Parse XML body with feed_rs parser, input in bytes` to tell Code\nSuggestions that we want to fetch the RSS feed URL content, and parse it\nwith the `feed_rs` crate functions.\n\n2. `// Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and\nprint its name`: Extract the feed type by comparing the `feed_type`\nattribute with the\n[`feed_rs::model::FeedType`](https://docs.rs/feed-rs/latest/feed_rs/model/enum.FeedType.html).\nThis needs more direct instructions for Code Suggestions telling it about\nthe exact Enum values to match against.\n\n\n![Instruct Code Suggestions to match against specific feed\ntypes](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_feed_rs_type_condition.png){:\n.shadow}\n\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let body = reqwest::blocking::get(thread_url).unwrap().bytes().unwrap();\n            let feed = feed_rs::parser::parse(body.as_ref()).unwrap();\n\n            // Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name\n            if feed.feed_type == feed_rs::model::FeedType::RSS2 {\n                println!(\"{} is an RSS2 feed\", thread_name);\n            } else if feed.feed_type == feed_rs::model::FeedType::Atom {\n                println!(\"{} is an Atom feed\", thread_name);\n            }\n```\n\n\nBuild and run the program again, and verify its output.\n\n\n```\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\nCNCF is an RSS2 feed\n\nTechCrunch is an RSS2 feed\n\nGitLab Blog is an Atom feed\n\nHacker News is an RSS2 feed\n\n```\n\n\nLet us verify this output by opening the feed URLs in the browser, or\ninspecting the previously downloaded files.\n\n\nHacker News supports RSS version 2.0, with\n`channel(title,link,description,item(title,link,pubDate,comments))`.\nTechCrunch and the CNCF blog follow a similar structure.\n\n```xml\n\n\u003Crss version=\"2.0\">\u003Cchannel>\u003Ctitle>Hacker\nNews\u003C/title>\u003Clink>https://news.ycombinator.com/\u003C/link>\u003Cdescription>Links for\nthe intellectually curious, ranked by\nreaders.\u003C/description>\u003Citem>\u003Ctitle>Writing a debugger from scratch:\nBreakpoints\u003C/title>\u003Clink>https://www.timdbg.com/posts/writing-a-debugger-from-scratch-part-5/\u003C/link>\u003CpubDate>Wed,\n27 Sep 2023 06:31:25\n+0000\u003C/pubDate>\u003Ccomments>https://news.ycombinator.com/item?id=37670938\u003C/comments>\u003Cdescription>\u003C![CDATA[\u003Ca\nhref=\"https://news.ycombinator.com/item?id=37670938\">Comments\u003C/a>]]>\u003C/description>\u003C/item>\u003Citem>\n\n```\n\n\nThe GitLab blog uses the\n[Atom](https://datatracker.ietf.org/doc/html/rfc4287) feed format similar to\nRSS, but still requires different parsing logic.\n\n```xml\n\n\u003C?xml version='1.0' encoding='utf-8' ?>\n\n\u003Cfeed xmlns='http://www.w3.org/2005/Atom'>\n\n\u003C!-- / Get release posts -->\n\n\u003C!-- / Get blog posts -->\n\n\u003Ctitle>GitLab\u003C/title>\n\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\n\u003Clink href='https://about.gitlab.com/blog/' />\n\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\n\u003Cauthor>\n\n\u003Cname>The GitLab Team\u003C/name>\n\n\u003C/author>\n\n\u003Centry>\n\n\u003Ctitle>Atlassian Server ending: Goodbye disjointed toolchain, hello\nDevSecOps platform\u003C/title>\n\n\u003Clink\nhref='https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/'\nrel='alternate' />\n\n\u003Cid>https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/\u003C/id>\n\n\u003Cpublished>2023-09-26T00:00:00+00:00\u003C/published>\n\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\n\u003Cauthor>\n\n\u003Cname>Dave Steer, Justin Farris\u003C/name>\n\n\u003C/author>\n\n```\n\n\n### Map generic feed data types\n\nUsing\n[`roxmltree::Document::parse`](https://docs.rs/roxmltree/latest/roxmltree/struct.Document.html)\nwould require us to understand the XML node tree and its specific tag names.\nFortunately,\n[feed_rs::model::Feed](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html)\nprovides a combined model for RSS and Atom feeds, therefore let us continue\nusing the `feed_rs` crate.\n\n\n1. Atom: Feed->Feed, Entry->Entry\n\n2. RSS: Channel->Feed, Item->Entry\n\n\nIn addition to the mapping above, we need to extract the required\nattributes, and map their data types. It is helpful to open the\n[feed_rs::model\ndocumentation](https://docs.rs/feed-rs/latest/feed_rs/model/index.html) to\nunderstand the structs and their fields and implementations. Otherwise, some\nsuggestions would result in type conversion errors and compilation failures,\nthat are specific to the `feed_rs` implementation.\n\n\nA [`Feed`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html)\nstruct provides the `title`, type `Option\u003CText>` (either a value is set, or\nnothing). An\n[`Entry`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html)\nstruct provides:\n\n\n1. `title`: `Option\u003CText>`with\n[`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and\nthe `content` field as `String`.\n\n2. `updated`: `Option\u003CDateTime\u003CUtc>>` with\n[`DateTime`](https://docs.rs/chrono/latest/chrono/struct.DateTime.html) with\nthe [`format()`\nmethod](https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.format).\n\n3. `summary`: `Option\u003CText>`\n[`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and\nthe `content` field as `String`.\n\n4. `links`: `Vec\u003CLink>`, vector with\n[`Link`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Link.html)\nitems. The `href` attribute provides the raw URL string.\n\n\nUse this knowledge to extract the required data from the feed entries.\nReminder that all `Option` types need to call `unwrap()`, which requires\nmore raw instructions for Code Suggestions.\n\n\n```rust\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html\n                // Loop over all entries, and print\n                // title.unwrap().content\n                // published.unwrap().format\n                // summary.unwrap().content\n                // links href as joined string\n                for entry in feed.entries {\n                    println!(\"Title: {}\", entry.title.unwrap().content);\n                    println!(\"Published: {}\", entry.published.unwrap().format(\"%Y-%m-%d %H:%M:%S\"));\n                    println!(\"Summary: {}\", entry.summary.unwrap().content);\n                    println!(\"Links: {:?}\", entry.links.iter().map(|link| link.href.clone()).collect::\u003CVec\u003CString>>().join(\", \"));\n                    println!();\n                }\n```\n\n\n![Code suggestions to print feed entry types, with specific\nrequirements](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_print_feed_entries_fields_with_rust_type_specifics.png){:\n.shadow}\n\n\n### Error handling with Option unwrap()\n\nContinue iterating on the multi-line instructions after building and running\nthe program again. Spoiler: `unwrap()` will call the `panic!` macro and\ncrash the program when it encounters empty values. This can happen if a\nfield like `summary` is not set in the feed data.\n\n\n```shell\n\nGitLab Blog is an Atom feed\n\nTitle: How the Colmena project uses GitLab to support citizen journalists\n\nPublished: 2023-09-27 00:00:00\n\nthread '\u003Cunnamed>' panicked at 'called `Option::unwrap()` on a `None`\nvalue', src/feed_reader.rs:40:59\n\n```\n\n\nA potential solution is to use\n[`std::Option::unwrap_or_else`](https://doc.rust-lang.org/std/option/enum.Option.html#method.unwrap_or_else)\nand set an empty string as default value. The syntax requires a closure that\nreturns an empty `Text` struct instantiation.\n\n\nSolving the problem required many attempts to find the correct\ninitialization, passing just an empty string did not work with the custom\ntypes. I will show you all my endeavors, including the research paths.\n\n\n```rust\n\n// Problem: The `summary` attribute is not always initialized. unwrap() will\npanic! then.\n\n// Requires use mime; and use feed_rs::model::Text;\n\n/*\n\n// 1st attempt: Use unwrap() to extraxt Text from Option\u003CText> type.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap().content);\n\n// 2nd attempt. Learned about unwrap_or_else, passing an empty string.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| \"\").content);\n\n// 3rd attempt. summary is of the Text type, pass a new struct\ninstantiation.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{}).content);\n\n// 4th attempt. Struct instantiation requires 3 field values.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{\"\", \"\",\n\"\"}).content);\n\n// 5th attempt. Struct instantation with public fields requires key: value\nsyntax\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\n\"\", src: \"\", content: \"\"}).content);\n\n// 6th attempt. Reviewed expected Text types in\nhttps://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html and created\nMime and String objects\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: String::new(), content: String::new()}).content);\n\n// 7th attempt: String and Option\u003CString> cannot be casted automagically.\nCompiler suggested using `Option::Some()`.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: Option::Some(), content: String::new()}).content);\n\n*/\n\n\n// xth attempt: Solution. Option::Some() requires a new String object.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: Option::Some(String::new()), content:\nString::new()}).content);\n\n```\n\n\nThis approach did not feel satisfying, since the code line is complicated to\nread, and required manual work without help from Code Suggestions. Taking a\nstep back, I reviewed what brought me there - if `Option` is `none`,\n`unwrap()` will throw an error. Maybe there is an easier way to handle this?\nI asked Code Suggestions in a new comment:\n\n\n```\n                // xth attempt: Solution. Option::Some() requires a new String object.\n                println!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n\n                // Alternatively, use Option.is_none()\n```\n\n\n![Code suggestions asked for alternative with\nOptions.is_none](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_after_complex_unwrap_or_else_ask_for_alternative_option.png){:\n.shadow}\n\n\nIncreased readability, less CPU cycles wasted on `unwrap()`, and a great\nlearning curve from solving a complex problem to using a boring solution.\nWin-win.\n\n\nBefore we forget: Re-add storing the XML data on disk to complete the reader\napp again.\n\n\n```rust\n                // Dump the parsed body to a file, as name-current-iso-date.xml\n                let file_name = format!(\"{}-{}.xml\", thread_name, chrono::Local::now().format(\"%Y-%m-%d-%H-%M-%S\"));\n                let mut file = std::fs::File::create(file_name).unwrap();\n                file.write_all(body.as_ref()).unwrap();\n```\n\n\nBuild and run the program to verify the output.\n\n\n```shell\n\ncargo build\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\n![VS Code Terminal, cargo run with formatted feed entries\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n\n## Benchmarks\n\n\n### Sequential vs. Parallel execution benchmark\n\nCompare the execution time benchmarks by creating five samples each.\n\n\n1. Sequential execution. [Example source code\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/1)\n\n2. Parallel exeuction. [Example source code\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/3)\n\n\n```shell\n\n# Sequential\n\ngit checkout sequential-exec\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.21s user 0.08s system 10% cpu 2.898 total\n\n0.21s user 0.08s system 11% cpu 2.585 total\n\n0.21s user 0.09s system 10% cpu 2.946 total\n\n0.19s user 0.08s system 10% cpu 2.714 total\n\n0.20s user 0.10s system 10% cpu 2.808 total\n\n```\n\n\n```shell\n\n# Parallel\n\ngit checkout parallel-exec\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.19s user 0.08s system 17% cpu 1.515 total\n\n0.18s user 0.08s system 16% cpu 1.561 total\n\n0.18s user 0.07s system 17% cpu 1.414 total\n\n0.19s user 0.08s system 18% cpu 1.447 total\n\n0.17s user 0.08s system 16% cpu 1.453 total\n\n```\n\n\nThe CPU usage increased for parallel execution of four RSS feed threads, but\nit nearly halved the total time compared to sequential execution. With that\nin mind, we can continue learning Rust and optimize the code and\nfunctionality.\n\n\nNote that we are running the debug build through Cargo, and not the\noptimized released builds yet. There are caveats with parallel execution\nthough: Some HTTP endpoints put rate limits in place, where parallelism\ncould hit these thresholds easier.\n\n\nThe system executing multiple threads in parallel might get overloaded too –\nthreads require context switching in the Kernel, assigning resources to each\nthread. While one thread gets computing resources, other threads are put to\nsleep. If there are too many threads spawned, this might slow down the\nsystem, rather than speeding up the operations. Solutions include design\npatterns such as [work\nqueues](https://docs.rs/work-queue/latest/work_queue/), where the caller\nadds a task into a queue, and a defined number of worker threads pick up the\ntasks for asynchronous execution.\n\n\nRust also provides data synchronisation between threads, so-called\n[channels](https://doc.rust-lang.org/rust-by-example/std_misc/channels.html).\nTo ensure concurrent data access,\n[mutexes](https://doc.rust-lang.org/std/sync/struct.Mutex.html) are\navailable to provide safe locks.\n\n\n### CI/CD with Rust caching\n\nAdd the following CI/CD configuration into the `.gitlab-ci.yml` file. The\n`run-latest` job calls `cargo run` with RSS feed URL examples, and measures\nthe execution time continuously.\n\n\n```\n\nstages:\n  - build\n  - test\n  - run\n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:\n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory for being cached.\n\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n\nrun-latest:\n  stage: run\n  script:\n    - time cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n\n![GitLab CI/CD pipelines for Rust, cargo run\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/gitlab_cicd_pipeline_rust_cargo_run_output.png){:\n.shadow}\n\n\n## What is next\n\nThis blog post was challenging to create, with both learning advanced Rust\nprogramming techniques myself, and finding a good learning curve with Code\nSuggestions. The latter greatly helps with quickly generating code, not just\nboilerplate snippets – it understands the local context, and better\nunderstands the purpose and scope of the algorithm, the more code you write.\nAfter reading this blog post, you know of a few challenges and turnarounds.\nThe example solution code for the reader app is available in [the\nlearn-rust-ai-app-reader\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader).\n\n\nParsing RSS feeds is challenging since it involves data structures, with\nexternal HTTP requests and parallel optimizations. As an experienced Rust\nuser, you might have wondered: `Why not use the std::rss crate?` -- It is\noptimized for advanced asynchronous execution, and does not allow to show\nand explain the different Rust functionalities, explained in this blog post.\nAs an async exercise, try to rewrite the code using the [`rss`\ncrate](https://docs.rs/rss/latest/rss/).\n\n\n### Async learning exercises\n\nThe lessons learned in this blog post also lay the foundation for future\nexploration with persistent storage and presenting the data. Here are a few\nideas where you can continue learning Rust and optimize the reader app:\n\n\n1. Data storage: Use a database like sqlite, and RSS feed update tracking.\n\n2. Notifications: Spawn child processes to trigger notifications into\nTelegram, etc.\n\n3. Functionality: Extend the reader types to REST APIs\n\n4. Configuration: Add support for configuration files for RSS feeds, APIs,\netc.\n\n5. Efficiency: Add support for filters, and subscribed tags.\n\n6. Deployments: Use a webserver, collect Prometheus metrics, and deploy to\nKubernetes.\n\n\nIn a future blog post, we will discuss some of these ideas, and how to\nimplement them. Dive into existing RSS feed implementations, and learn how\nyou can refactor the existing code into leveraging more Rust libraries\n(`crates`).\n\n\n### Share your feedback\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please [share your\nthoughts in the feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[716,2402,737,759,9],"careers",{"slug":2404,"featured":6,"template":693},"learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","Learn Advanced Rust Programming With A Little Help From Ai Code Suggestions","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"_path":2410,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2411,"content":2416,"config":2420,"_id":2422,"_type":14,"title":2423,"_source":16,"_file":2424,"_stem":2425,"_extension":19},"/en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"title":2412,"description":2413,"ogTitle":2412,"ogDescription":2413,"noIndex":6,"ogImage":2176,"ogUrl":2414,"ogSiteName":706,"ogType":707,"canonicalUrls":2414,"schema":2415},"Learning Python with a little help from AI","Use this guided tutorial, along with GitLab Duo Code Suggestions, to learn a new programming language.","https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Python with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":2412,"description":2413,"authors":2417,"heroImage":2176,"date":1741,"body":2418,"category":10,"tags":2419},[1262],"Learning a new programming language can help broaden your software\ndevelopment expertise, open career opportunities, or create fun challenges.\nHowever, it can be difficult to decide on one specific approach to learning\na new language. Artificial intelligence (AI) can help. In this tutorial,\nyou'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a\nguided experience in learning the Python programming language with a\npractical hands-on example.\n\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language:\nPython](#learning-a-new-programming-language-python)\n    - [Development environment for Python](#development-environment-for-python)\n    - [Hello, World](#hello-world)\n- [Start learning Python with a practical\nexample](#start-learning-python-with-a-practical-example)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n- [File I/O: Read and print a log file](#file-io-read-and-print-a-log-file)\n\n- [Flow control](#flow-control)\n    - [Loops and lists to collect files](#loops-and-lists-to-collect-files)\n    - [Conditionally collect files](#conditionally-collect-files)\n- [Functions](#functions)\n    - [Start with a simple log format](#start-with-a-simple-log-format)\n    - [String and data structure operations](#string-and-data-structure-operations)\n    - [Parse log files using regular expressions](#parse-log-files-using-regular-expressions)\n    - [Advanced log format: auth.log](#advanced-log-format-authlog)\n    - [Parsing more types: Structured logging](#parsing-more-types-structured-logging)\n- [Printing results and formatting](#printing-results-and-formatting)\n\n- [Dependency management and continuous\nverification](#dependency-management-and-continuous-verification)\n    - [Pip and pyenv: Bringing structure into Python](#pip-and-pyenv-bringing-structure-into-python)\n    - [Automation: Configure CI/CD pipeline for Python](#automation-configure-cicd-pipeline-for-python)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\n\nChoose your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors),\nand follow the documentation to enable Code Suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nProgramming languages can require installing the language interpreter\ncommand-line tools or compilers that generate binaries from source code to\nbuild and run the application.\n\n\n**Tip:** You can also use [GitLab Remote Development\nworkspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to\ncreate your own cloud development environments, instead of local development\nenvironments. This blog post focuses on using VS Code and the GitLab Web\nIDE. \n\n\n### VS Code\n\n\n[Install VS Code](https://code.visualstudio.com/download) on your client,\nand open it. Navigate to the `Extensions` menu and search for `gitlab\nworkflow`. Install the [GitLab Workflow extension for VS\nCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\nVS Code will also detect the programming languages, and offer to install\nadditional plugins for syntax highlighting and development experience. For\nexample, install the [Python\nextension](https://marketplace.visualstudio.com/items?itemName=ms-python.python).\n\n\n### Code Suggestions\n\n\nFamiliarize yourself with suggestions before actually verifying the\nsuggestions. GitLab Duo Code Suggestions are provided as you type, so you do\nnot need use specific keyboard shortcuts. To accept a code suggestion, press\nthe `tab` key. Also note that writing new code works more reliably than\nrefactoring existing code. AI is non-deterministic, which means that the\nsame suggestion may not be repeated after deleting the code suggestion.\nWhile Code Suggestions is in Beta, we are working on improving the accuracy\nof generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience.\n\n\n**Tip:** The latest release of Code Suggestions supports multiline\ninstructions. You can refine the specifications to your needs to get better\nsuggestions. We will practice this method throughout the blog post.\n\n\n## Learning a new programming language: Python  \n\n\nNow, let's dig into learning Python, which is one of the [supported\nlanguages in Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n\nBefore diving into the source code, make sure to set up your development\nenvironment.\n\n\n### Development environment for Python \n\n\n1) Create a new project `learn-python-ai` in GitLab, and clone the project\ninto your development environment. All code snippets are available in this\n[\"Learn Python with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n\n```shell\n\ngit clone https://gitlab.com/NAMESPACE/learn-python-ai.git\n\n\ncd learn-python-ai\n\n\ngit status\n\n```\n\n\n2) Install Python and the build toolchain. Example on macOS using Homebrew:\n\n\n```\n\nbrew install python\n\n```\n\n\n3) Consider adding a `.gitignore` file for Python, for example this\n[.gitignore template for\nPython](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/gitignore/Python.gitignore?ref_type=heads). \n\n\nYou are all set to learn Python! \n\n\n### Hello, World\n\n\nStart your learning journey in the [official\ndocumentation](https://www.python.org/about/gettingstarted/), and review the\nlinked resources, for example, the [Python\ntutorial](https://docs.python.org/3/tutorial/index.html). The\n[library](https://docs.python.org/3/library/index.html) and [language\nreference](https://docs.python.org/3/reference/index.html) documentation can\nbe helpful, too. \n\n\n**Tip:** When I touched base with Python in 2005, I did not have many use\ncases except as a framework to test Windows 2000 drivers. Later, in 2016, I\nrefreshed my knowledge with the book \"Head First Python, 2nd Edition,\"\nproviding great practical examples for the best learning experience – two\nweeks later, I could explain the differences between Python 2 and 3. You do\nnot need to worry about Python 2 – it has been deprecated some years ago,\nand we will focus only on Python 3 in this blog post. In August 2023, \"[Head\nFirst Python, 3rd\nEdition](https://www.oreilly.com/library/view/head-first-python/9781492051282/)\"\nwas published. The book provides a great learning resource, along with the\nexercises shared in this blog post. \n\n\nCreate a new file `hello.py` in the root directory of the project and start\nwith a comment saying `# Hello world`. Review and accept the suggestion by\npressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n\n```\n\n# Hello world\n\n```\n\n\nCommit the change to the Git repository. In VS Code, use the keyboard\nshortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to\nsubmit. \n\n\nUse the command palette (`cmd shift p`) and search for `create terminal` to\nopen a new terminal. Run the code with the Python interpreter. On macOS, the\nbinary from Homebrew is called `python3`, other operating systems and\ndistributions might use `python` without the version.\n\n\n```shell\n\npython3 hello.py\n\n```\n\n\n![Hello World, hello GitLab Duo Code\nSuggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_python_code_sugestions_hello_world.png)\n\n\n**Tip:** Adding code comments in Python starting with the `#` character\nbefore you start writing a function or algorithm will help Code Suggestions\nwith more context to provide better suggestions. In the example above, we\ndid that with `# Hello world`, and will continue doing so in the next\nexercises.\n\n\nAdd `hello.py` to Git, commit all changes and push them to your GitLab\nproject.\n\n\n```shell\n\ngit add hello.py\n\n\ngit commit -avm \"Initialize Python\"\n\n\ngit push\n\n```\n\n\nThe source code for all exercises in this blog post is available in this\n[\"Learn Python with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n\n## Start learning Python with a practical example \n\n\nThe learning goal in the following sections involves diving into the\nlanguage datatypes, variables, flow control, and functions. We will also\nlook into file operations, string parsing, and data structure operations for\nprinting the results. The exercises will help build a command-line\napplication that reads different log formats, works with the data, and\nprovides a summary. This will be the foundation for future projects that\nfetch logs from REST APIs, and inspire more ideas such as rendering images,\ncreating a web server, or adding Observability metrics.\n\n\n![Parsing log files into structured objects, example result after following\nthe\nexercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\n\nAs an experienced admin, you can put the script into production and use\nreal-world log format exmples. Parsing and analyzing logs in stressful\nproduction incidents can be time-consuming. A local CLI tool is sometimes\nfaster than a log management tool.\n\n\nLet's get started: Create a new file called `log_reader.py` in the directory\nroot, add it to Git, and create a Git commit.\n\n\n### Define variables and print them\n\n\nAs a first step, we need to define the log files location, and the expected\nfile suffix. Therefore, let's create two variables and print them. Actually,\nask Code Suggestions to do that for you by writing only the code comments\nand accepting the suggestions. Sometimes, you need to experiment with\nsuggestions and delete already accepted code blocks. Do not worry – the\nquality of the suggestions will improve over time as the model generates\nbetter suggestions with more context.\n\n\n![Define log path and file suffix\nvariables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_01.png){:\n.shadow}\n\n\n![Print the variables to\nverify](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_02.png){:\n.shadow}\n\n\n```python\n\n# Specify the path and file suffix in variables\n\npath = '/var/log/'\n\nfile_suffix = '.log'\n\n\n# Print the variables \n\n\nprint(path)\n\nprint(file_suffix)\n\n```\n\n\nNavigate into the VS Code terminal and run the Python script:\n\n\n```shell\n\npython3 log_reader.py\n\n```\n\n\n![VS Code terminal, printing the\nvariables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_variables.png)\n\n\nPython supports many different types in the [standard\nlibrary](https://docs.python.org/3/library/index.html). Most common types\nare: Numeric (int, float, complex), Boolean (True, False), and String (str).\nData structures include support for lists, tuples, and dictionaries. \n\n\n### Explore variable types \n\n\nTo practice different variable types, let's define a limit of log files to\nread as a variable with the `integer` type.\n\n\n![Log file\nvariable](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_03.png){:\n.shadow}\n\n\n```python\n\n# Define log file limit variable \n\nlog_file_limit = 1024 \n\n```\n\n\nCreate a Boolean variable that forces to read all files in the directory, no\nmatter the log file suffix. \n\n\n```python\n\n# Define boolean variable whether to read all files recursively\n\nread_all_files_recursively = True\n\n```\n\n\n## File I/O: Read and print a log file\n\n\nCreate a directory called `log-data` in your project tree. You can copy all\nfile examples from the [log-data directory in the example\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).  \n\n\nCreate a new file `sample.log` with the following content, or any other two\nlines that provide a different message at the end.\n\n\n```\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup\nservice.\n\n```\n\n\nInstruct Code Suggestions to read the file `log-data/sample.log` and print\nthe content. \n\n\n![Code Suggestions: Read log file and print\nit](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_log_file_and_print.png){:\n.shadow}\n\n\n```python\n\n# Read the file in log-data/sample.log and print its content\n\nwith open('log-data/sample.log', 'r') as f:\n    print(f.read())\n```\n\n\n**Tip:** You will notice the indent here. The `with open() as f:` statement\nopens a new scope where `f` is available as stream. This flow requires\nindenting )`tab`) the code block, and perform actions in this scope, calling\n`f.read()` to read the file contents, and passing the immediate value as\nparameter into the `print()` function.\n\n\nNavigate into the terminal, and run the script again with `python3\nlog_reader.py`. You will see the file content shown in the VS Code editor,\nalso printed into the terminal.\n\n\n![VS Code terminal: Read log file, and print\nit](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_logfile_show_file_sample.png)\n\n\n## Flow control \n\n\nReading one log file is not enough – we want to analyze all files in a given\ndirectory recursively. For the next exercise, we instruct Code Suggestions\nto create an index of all files. \n\n\nPrepare the `log-data` directory with more example files from the [log-data\ndirectory in the example\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).\nThe directory tree should look as follows:\n\n\n```shell\n\ntree log-data                                                             ─╯\n\nlog-data\n\n├── sample.log\n\n└── var\n    └── log\n        ├── auth.log\n        ├── syslog.log\n        └── syslog_structured.log\n\n3 directories, 4 files\n\n```\n\n\n### Loops and lists to collect files \n\n\nModify the `path` variable to use the value `log-data/`. \n\n\n```python\n\n# Specify the path and file suffix in variables\n\npath = 'log-data/'\n\nfile_suffix = '.log'\n\n```\n\n\nTell Code Suggestions to read all file paths in the directory into a list.\nAfter the collection loop, print the list of file paths. \n\n\n```python\n\n# Read all file paths in the directory into a list\n\n\n# Print the list of log file paths\n\n```\n\n\n![Code Suggestion, collect file\npaths](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_collect_files.png){:\n.shadow}\n\n\nAn example of a suggestion could look like this:\n\n\n```python\n\n# Read all file paths in the directory into a list\n\nimport os\n\n\n# Read all file paths in the directory into a list\n\nlog_files = []\n\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Print the list of log file paths\n\n\nprint(log_files)\n\n```\n\n\nLet's analyze what happens here: The `import os` statement is required to\nbring the `os` library into the current scope, and we are able to call\n`os.walk()` later. The `log_files`\n[list](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range)\nis initialized as an empty list using empty brackets. `os.walk()` expects a\npath as directory tree to start searching for files. The function returns a\ngenerator object, which will walk the directory tree recursively. The\nreturned tuples include the collected files, which we want to access in a\nloop iteraor, using the `for` loop notation. When the file ends with the\ndefined suffix, its full path is appended to the list. This is an optimized\nand complex example. Do not worry about understanding everything just yet -\nwe will revisit this later. \n\n\n### Conditionally collect files \n\n\nModify the instructions and add the Boolean flag evaluation for\n`read_all_files_recursively` as an input to the glob() function. \n\n\n```python\n\n# Read all file paths in the top level directory into a list\n\n# If read_all_files_recursively is True, read all files recursively\n\n```\n\n\n![Code Suggestions, read files recursively\nconditionally](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_files_recursively_conditionally.png){:\n.shadow}\n\n\n```\n\n# Read all file paths in the top level directory \n\nimport os\n\n\n# Read all file paths in the top level directory into a list\n\n# If read_all_files_recursively is True, read all files recursively\n\n\nlog_files = []\n\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\n\n# Print log_files\n\nprint(log_files)\n\n```\n\n\nThe result is not optimal yet because it always executes the first loop, and\noptionally the second loop. This flow leads to duplicated results when the\nscript is executed.\n\n\n```\n\npython3 log_reader.py\n\n\n['log-data/sample.log', 'log-data/sample.log', 'log-data/var/log/auth.log']\n\n```\n\n\nExperiment with Code Suggestions instructions to get a solution for the\nproblem. There are different approaches you can take: \n\n\n1) A potential solution is to wrap the source code into an if-then-else\nblock, and move the `os.listdir()` loop into the else-block. \n\n\n```python\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\nelse:\n    for file in os.listdir(path):\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(path, file))  \n\n```\n\n\n2) Alternatively, do not use `append()` to always add a new list entry, but\ncheck if the item exists in the list first. \n\n\n```python\n\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        # check if the entry exists in the list already\n        if os.path.isfile(os.path.join(path, file)):\n            log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):\n                # check if the entry exists in the list already\n                if file not in log_files:\n                    log_files.append(os.path.join(root, file))\n```\n\n\n3) Or, we could eliminate duplicate entries after collecting all items.\nPython allows converting lists into\n[sets](https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset),\nwhich hold unique entries. After applying `set()`, you can again convert the\nset back into a list. Code Suggestions knows about this possibility, and\nwill help with the comment `# Ensure that only unique file paths are in the\nlist` \n\n\n![Code Suggestions, converting a list to unique\nitems](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_unique_list.png){:\n.shadow}\n\n\n```python\n\n# Ensure that only unique file paths are in the list\n\n\nlog_files = list(set(log_files))\n\n```\n\n\n4) Take a step back and evaluate whether the variable\nread_all_files_recursively makes sense. Maybe the default behavior should\njust be reading all files recursively?\n\n\n**Tip for testing different paths in VS Code:** Select the code blocks, and\npress [`cmd /` on\nmacOS](https://code.visualstudio.com/docs/getstarted/keybindings) to comment\nout the code. \n\n\n## Functions \n\n\nLet's create a function called `parse_log_file` that parses a log file, and\nreturns the extracted data. We will define the expected log format and\ncolumns to extract, following the [syslog format\nspecification](https://en.wikipedia.org/wiki/Syslog). There are different\nlog format types and also customized formats by developers that need to be\ntaken into account – exercise for later. \n\n\n### Start with a simple log format \n\n\nInspect a running Linux VM, or use the following example log file example\nfor additional implementation.\n\n\n```\n\nless /var/log/syslog | grep -v docker \n\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Daily dpkg database backup\nservice...\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Rotate log files...\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup\nservice.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: logrotate.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\n\nOct 17 00:17:01 ebpf-chaos CRON[727495]: (root) CMD (   cd / && run-parts\n--report /etc/cron.hourly)\n\n```\n\n\nWe can create an algorithm to split each log line by whitespaces, and then\njoin the results again. Let's ask Code Suggestions for help. \n\n\n```python\n\n# Split log line \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log\nfiles.\" by whitespaces and save in a list\n\n\nlog_line = \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log\nfiles.\"\n\nlog_line_split = log_line.split(\" \")\n\nprint(log_line_split)\n\n```\n\n\nRun the script again to verify the result.\n\n\n```shell\n\npython3 log_reader.py\n\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate',\n'log', 'files.']\n\n```\n\n\nThe first three items are part of the datetime string, followed by the host,\nservice, and remaining log message items. Let's practice string operations\nin Python as the next step. \n\n\n### String and data structure operations\n\n\nLet's ask Code Suggestions for help with learning to join strings, and\nperform list operations.\n\n\n1. Join the first three items with a whitespace again. \n\n2. Keep host and service. \n\n3. Join the remaining variable item count into a string, separated with\nwhitespaces, again. \n\n4. Store the identified column keys, and their respective values in a new\ndata structure:\n[dictionary](https://docs.python.org/3/library/stdtypes.html#mapping-types-dict). \n\n\n![Code suggestions for list items with string\noperations](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_list_items_string_join_extract.png){:\n.shadow}\n\n\n```shell \n\npython3 log_reader.py\n\n\n# Array\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate',\n'log', 'files.']\n\n\n# Dictionary \n\n{'datetime': 'Oct 17 00:00:04', 'host': 'ebpf-chaos', 'service':\n'systemd[1]:', 'message': ' ebpf-chaos systemd[1]: Finished Rotate log\nfiles.'}\n\n```\n\n\nA working suggestion can look like the following:\n\n\n```python\n\n# Initialize results dictionary with empty values for datetime, host,\nservice, message\n\n# Loop over log line split \n\n# Join the first three list items as date string\n\n# Item 4: host \n\n# Item 5: service\n\n# Join the remaining items into a string, separated with whitespaces \n\n# Print the results after the loop \n\n\nresults = {'datetime': '', 'host': '', 'service': '', 'message': ''}\n\n\nfor item in log_line_split:\n\n    if results['datetime'] == '':\n        results['datetime'] = ' '.join(log_line_split[0:3])\n\n    elif results['host'] == '':\n        results['host'] = log_line_split[3]\n\n    elif results['service'] == '':\n        results['service'] = log_line_split[4]\n\n    else:\n        results['message'] += ' ' + item\n\nprint(results)\n\n\n```\n\n\nThe suggested algorithm loops over all log line items, and applies the same\noperation for the first three items. `log_line_split[0:3]` extracts a slice\nof three items into a new list. Calling `join()` on a separator character\nand passing the array as an argument joins the items into a string. The\nalgorithm continues to check for not initialized values for host (Item 4)\nand service (Item 5)and concludes with the remaining list items appended\ninto the message string. To be honest, I would have used a slightly\ndifferent algorithm, but it is a great learning curve to see other\nalgorithms, and ways to implement them. Practice with different\ninstructions, and data structures, and continue printing the data sets. \n\n\n**Tip:** If you need to terminate a script early, you can use `sys.exit()`.\nThe remaining code will not be executed. \n\n\n```python\n\nimport sys \n\nsys.exit(1)\n\n```\n\n\nImagine doing these operations for different log formats, and message types\n– it can get complicated and error-prone very quickly. Maybe there is\nanother approach. \n\n\n### Parse log files using regular expressions\n\n\nThere are different syslog format RFCs – [RFC\n3164](https://datatracker.ietf.org/doc/html/rfc3164) is obsolete but still\nfound in the wild as default configuration (matching the pattern above),\nwhile [RFC 5424](https://datatracker.ietf.org/doc/html/rfc5424) is more\nmodern, including datetime with timezone information. Parsing this format\ncan be tricky, so let's ask Code Suggestions for advice. \n\n\nIn some cases, the suggestions include regular expressions. They might not\nmatch immediately, making the code more complex to debug, with trial and\nerrors. A good standalone resource to text and explain regular expressions\nis [regex101.com](https://regex101.com/).  \n\n\n**Tip:** You can skip diving deep into regular expressions using the\nfollowing code snippet as a quick cheat. The next step involves instructing\nCode Suggestions to use these log patterns, and help us extract all valuable\ncolumns. \n\n\n```python\n\n# Define the syslog log format regex in a dictionary\n\n# Add entries for RFC3164, RFC5424\n\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n```\n\n\nWe know what the function should do, and its input parameters – the file\nname, and a log pattern to match. The log lines should be split by this\nregular expression, returning a key-value dictionary for each log line. The\nfunction should return a list of dictionaries. \n\n\n```python\n\n# Create a function that parses a log file\n\n# Input parameter: file path\n\n# Match log line against regex_log_pattern\n\n# Return the results as dictionary list: log line, pattern, extracted\ncolumns\n\n```\n\n\n![Code suggestion based on a multiline comment instruction to get a function\nthat parses a log file based on regex\npatterns](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_01.png){:\n.shadow}\n\n\nRemember the indent for opening a new scope? The same applies for functions\nin Python. The `def` identifier requires a function name, and a list of\nparameters, followed by an opening colon. The next lines of code require the\nindent. VS Code will help with live-linting wrong indent, before the script\nexecution fails, or the CI/CD pipelines. \n\n\nContinue with Code Suggestions – it might already know that you want to\nparse all log files, and parse them using the newly created function. \n\n\n![Code suggestion to parse all log files, and print the result\nset](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_02.png){:\n.shadow}\n\n\nA full working example can look like this: \n\n\n```\n\nimport os\n\n\n# Specify the path and file suffix in variables\n\npath = 'log-data/'\n\nfile_suffix = '.log'\n\n\n# Read all file paths in the directory into a list\n\nlog_files = []\n\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Define the syslog log format regex in a dictionary\n\n# Add entries for RFC3164, RFC5424\n\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n\n# Create a function that parses a log file\n\n# Input parameter: file path\n\n# Match log line against regex_log_pattern\n\n# Return the results as dictionary list: log line, pattern name, extracted\ncolumns\n\nimport re\n\n\ndef parse_log_file(file_path):\n    # Read the log file\n    with open(file_path, 'r') as f:\n        log_lines = f.readlines()\n\n    # Create a list to store the results\n    results = []\n\n    # Iterate over the log lines\n    for log_line in log_lines:\n        # Match the log line against the regex pattern\n        for pattern_name, pattern in regex_log_pattern.items():\n            match = re.match(pattern, log_line)\n\n            # If the log line matches the pattern, add the results to the list\n            if match:\n                extracted_columns = match.groups()\n                results.append({\n                    'log_line': log_line,\n                    'pattern_name': pattern_name,\n                    'extracted_columns': extracted_columns,\n                    'source_file': file_path\n                })\n\n    # Return the results\n    return results\n\n# Parse all files and print results\n\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    print(results)\n```\n\n\nLet's unpack what the `parse_log_file()` function does:\n\n\n1. Opens the file from `file_path` parameter. \n\n2. Reads all lines into a new variable `log_lines`. \n\n3. Creates a results list to store all items. \n\n4. Iterates over the log lines. \n\n5. Matches against all regex patterns configured in regex_log_pattern. \n\n6. If a match is found, extracts the matching column values.\n\n7. Creates a results item, including the values for the keys `log_line`,\n`pattern_name`, `extracted_colums`, `source_file`. \n\n8. Appends the results item to the results list.\n\n9. Returns the results list. \n\n\nThere are different variations to this – especially for the returned result\ndata structure. For this specific case, log lines come as list already.\nAdding a dictionary object instead of a raw log line allows function callers\nto extract the desired information in the next step. Once a working example\nhas been implemented, you can refactor the code later, too. \n\n\n### Advanced log format: auth.log\n\n\nParsing the syslog on a Linux distribution might not unveil the necessary\ndata to analyze. On a virtual machine that exposes port 22 (SSH) to the\nworld, the authentication log is much more interesting – plenty of bots and\nmalicious actors testing default password combinations and often brute force\nattacks.\n\n\nThe following snippet from `/var/log/auth.log` on one of my private servers\nshows the authentication log format and the random attempts from bots using\ndifferent usernames, etc. \n\n\n```\n\nOct 15 00:00:19 ebpf-chaos sshd[3967944]: Failed password for invalid user\nubuntu from 93.254.246.194 port 48840 ssh2\n\nOct 15 00:00:20 ebpf-chaos sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2\n\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Received disconnect from\n93.254.246.194 port 48840:11: Bye Bye [preauth]\n\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Disconnected from invalid user\nubuntu 93.254.246.194 port 48840 [preauth]\n\nOct 15 00:00:24 ebpf-chaos sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Received disconnect from\n180.101.88.227 port 44397:11:  [preauth]\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Disconnected from authenticating\nuser root 180.101.88.227 port 44397 [preauth]\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: PAM 2 more authentication\nfailures; logname= uid=0 euid=0 tty=ssh ruser= rhost=180.101.88.227 \nuser=root\n\nOct 15 00:00:25 ebpf-chaos sshd[3967998]: Invalid user teamspeak from\n185.218.20.10 port 33436\n\n```\n\n\n**Tip for intrusion prevention:** Add a firewall setup, and use\n[fail2ban](https://en.wikipedia.org/wiki/Fail2ban) to block invalid auth\nlogins. \n\n\nThe next exercise is to extend the logic to understand the free form log\nmessage parts, for example `Failed password for invalid user ubuntu from\n93.254.246.194 port 48840 ssh2`. The task is to store the data in an\noptional dictionary with key value pairs. \n\n\nCreate a new function that takes the previously parsed log line results as\ninput, and specifically parses the last list item for each line.\n\n\n1. Count the number of `Failed password` and `Invalid user` messages.\n\n2. Return the results with count, log file, pattern \n\n\n![Code suggestions for a log file message parser to count auth.log\nfailures](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_parse_log_message_auth_log.png){:\n.shadow}\n\n\nA working suggestion can look like the following code:\n\n\n```python\n\n# Create a function that parses a log file message from the last\nextracted_columns entry \n\n# Input: Parsed log lines results list \n\n# Loop over all log lines in the list, and extract the last list item as\nmessage \n\n# Count failure strings in the message: Failed password, Invalid user \n\n# Return the results if failure count greater 0: log_file, count, failure\nstring\n\ndef parse_log_file_message(results):\n    failure_results = []\n\n    # Iterate over the log lines\n    for result in results:\n        # Extract the message from the last list item\n        message = result['extracted_columns'][-1]\n\n        # Count the number of failure strings in the message\n        failure_count = message.count('Failed password') + message.count('Invalid user')\n\n        # If the failure count is greater than 0, add the results to the list\n        if failure_count > 0:\n            failure_results.append({\n                'log_file': result['source_file'],\n                'count': failure_count,\n                'failure_string': message\n            })\n\n    # Return the results\n    return failure_results\n\n# Parse all files and print results\n\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    failure_results = parse_log_file_message(results)\n    print(failure_results)\n```\n\n\nThe algorithm follows the previous implementations: First, create a results\narray to store matching data. Then, iterate over the already parsed\nlog_lines in the list. Each log line contains the `extracted_columns` key,\nwhich holds the free-form message string at the end. The next step is to\ncall the string object function `count()` to count how many times a given\ncharacter sequence is contained in a string. The returned numbers are added\nup to the `failure_count` variable. If it is greater than zero, the result\nis added to the results list, including the `log_file`, `count` and\n`failure_string` key-value pairs. After returning the parsed log message\nresults, loop through all log files, parse them, and print the results\nagain. \n\n\nExecute the script to inspect the detected matches. Note that the data\nstructure can be optimized in future learning steps.\n\n\n```\n\npython3 log_reader.py\n\n\n[{'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string':\n'sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194\nport 48840 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1,\n'failure_string': 'sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967998]: Invalid user teamspeak from\n185.218.20.10 port 33436'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967998]: Failed password for invalid\nuser teamspeak from 185.218.20.10 port 33436 ssh2'}, {'log_file':\n'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3968077]:\nInvalid user mcserver from 218.211.33.146 port 50950'}]\n\n\n```\n\n\n### Parsing more types: Structured logging\n\n\nApplication developers can use the structured logging format to help machine\nparsers to extract the key value pairs. Prometheus provides this information\nin the following structure in syslog:\n\n\n```\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.425Z\ncaller=compact.go:519 level=info component=tsdb m\n\nsg=\"write block\" mint=1697558404661 maxt=1697565600000\nulid=01HCZG4ZX51GTH8H7PVBYDF4N6 duration=148.675854ms\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.464Z\ncaller=head.go:1213 level=info component=tsdb msg\n\n=\"Head GC completed\" caller=truncateMemory duration=6.845245ms\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.467Z\ncaller=checkpoint.go:100 level=info component=tsd\n\nb msg=\"Creating checkpoint\" from_segment=2308 to_segment=2309\nmint=1697565600000\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.517Z\ncaller=head.go:1185 level=info component=tsdb msg\n\n=\"WAL checkpoint complete\" first=2308 last=2309 duration=50.052621ms\n\n```\n\n\nThis format is easier to parse for scripts, because the message part can be\nsplit by whitespaces, and the assignment character `=`. Strings that contain\nwhitespaces are guaranteed to be enclosed with quotes. The downside is that\nnot all programming language libraries provide ready-to-use structured\nlogging libraries, making it harder for developers to adopt this format. \n\n\nPractice following the previous example to parse the `auth.log` format with\nadditional information. Tell Code Suggestions that you are expecting\nstructured logging format with key-value pairs, and which returned data\nstructure would be great:\n\n\n```python\n\n# Create a function that parses a log file message from the last\nextracted_columns entry \n\n# Input: Parsed log lines results list \n\n# Loop over all log lines in the list, and extract the last list item as\nmessage \n\n# Parse structured logging key-value pairs into a dictionary\n\n# Return results: log_file, dictionary \n\n```\n\n\n![Code suggestions for parsing structured logging format in the log file\nmessage\npart](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_propose_structured_logging_message_parser.png){:\n.shadow}\n\n\n### Printing results and formatting\n\n\nMany of the examples used the `print()` statement to print the content on\nthe terminal. Python objects in the standard library support text\nrepresentation, and for some types it makes more sense (string, numbers),\nothers cannot provide much details (functions, etc.). \n\n\nYou can also pretty-print almost any data structure (lists, sets,\ndictionaries) in Python. The JSON library can format data structures in a\nreadable format, and use a given spaces indent to draw the JSON structure on\nthe terminal. Note that we use the `import` statement here to bring\nlibraries into the current scope, and access their methods, for example\n`json.dumps`. \n\n\n```python\n\nimport json \n\nprint(json.dumps(structured_results, indent=4))\n\n```\n\n\n![Parsing log files into structured objects, example result after following\nthe\nexercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\n\nPractice with modifying the existing source code, and replace the code\nsnippets where appropriate. Alternatively, create a new function that\nimplements pretty printing.\n\n\n```python\n\n# Create a pretty print function with indent 4 \n\n```\n\n\n![Code suggestions for pretty-print\nfunction](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_pretty_print.png){:\n.shadow}\n\n\nThis idea works in a similar fashion with creating your own logger\nfunctions...but we have to stop learning and take a break. Before we\nconclude the first blog post in the learning series, let's ensure that CI/CD\nand dependencies are set up properly for future exercises and async\npractice. \n\n\n## Dependency management and continuous verification  \n\n\n### Pip and pyenv: Bringing structure into Python \n\n\nDependencies can be managed in the [`requirements.txt`\nfile](https://pip.pypa.io/en/stable/reference/requirements-file-format/),\nincluding optional version dependencies. Using `requirements.txt` file also\nhas the advantage of being the single source of truth for local development\nenvironments and running continuous builds with GitLab CI/CD. They can use\nthe same installation command:\n\n\n```shell\n\npip install -r requirements.txt\n\n```\n\n\nSome Linux distributions do not install the pip package manager by default,\nfor example, Ubuntu/Debian require to install the `python3-pip` package. \n\n\nYou can manage different virtual environments using\n[venv](https://docs.python.org/3/library/venv.html). This workflow can be\nbeneficial to install Python dependencies into the virtual environment,\ninstead of globally into the OS path which might break on upgrades. \n\n\n```shell\n\npip install virtualenv\n\nvirtualenv venv\n\nsource venv/bin/activate \n\n```\n\n\n### Automation: Configure CI/CD pipeline for Python\n\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should continuously\nlint, test, and build the code. You can mimic the steps from the local\ndevelopment, and add testing more environments and versions: \n\n\n1. Lint the source code and check for formatting errors. The example uses\n[Pyflakes](https://pypi.org/project/pyflakes/), a mature linter, and\n[Ruff](https://docs.astral.sh/ruff/ ), a fast linter written in Rust. \n\n2. Cache dependencies installed using the pip package manager, following the\ndocumentation for [Python caching in GitLab\nCI/CD](https://docs.gitlab.com/ee/ci/caching/#cache-python-dependencies).\nThis saves time and resources on repeated CI/CD pipeline runs.\n\n3. Use parallel matrix builds to test different Python versions, based on\nthe available container images on Docker Hub and their tags. \n\n\n```yaml\n\nstages:\n  - lint\n  - test\n\ndefault:\n  image: python:latest\n  cache:                      # Pip's cache doesn't store the python packages\n    paths:                    # https://pip.pypa.io/en/stable/topics/caching/\n      - .cache/pip\n  before_script:\n    - python -V               # Print out python version for debugging\n    - pip install virtualenv\n    - virtualenv venv\n    - source venv/bin/activate\n\nvariables:  # Change pip's cache directory to be inside the project\ndirectory since we can only cache local items.\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.cache/pip\"\n\n# lint template\n\n.lint-tmpl:\n  script:\n    - echo \"Linting Python version $VERSION\"\n  parallel:\n    matrix:\n      - VERSION: ['3.9', '3.10', '3.11', '3.12']   # https://hub.docker.com/_/python\n\n# Lint, using Pyflakes: https://pypi.org/project/pyflakes/ \n\nlint-pyflakes:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - find . -not -path './venv' -type f -name '*.py' -exec sh -c 'pyflakes {}' \\;\n\n# Lint, using Ruff (Rust): https://docs.astral.sh/ruff/ \n\nlint-ruff:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - ruff .\n```\n\n\n![GitLab CI/CD Python lint job view, part of matrix\nbuilds](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_cicd_python_lint_job_log_view.png)\n\n\n## What is next \n\n\nFun fact: GitLab Duo Code Suggestions also helped writing this blog post in\nVS Code, knowing about the context. In the screenshot, I just wanted to add\na tip about [regex101](https://regex101.com/), and GitLab Duo already knew. \n\n\n![Writing the GitLab blog post in VS Code with support from GitLab Duo Code\nSuggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_duo_code_suggestions_helping_write_the_learning_python_ai_blog_post.png)\n\n\nIn an upcoming blog, we will look into advanced learning examples with more\npractical (log) filtering and parallel operations, how to fetch logs from\nAPI endpoints (CI/CD job logs for example), and more data analytics and\nobservability. Until then, here are a few recommendations for practicing\nasync.\n\n\n### Async learning exercises\n\n\n- Implement the missing `log_file_limit` variable check. \n\n- Print a summary of the results in Markdown, not only JSON format. \n\n- Extend the script to accept a search filter as environment variable.\nPrint/count only filtered results. \n\n- Extend the script to accept a date range. It might require parsing the\ndatetime column in a time object to compare the range. \n\n- Inspect a GitLab CI/CD pipeline job log, and download the raw format.\nExtend the log parser to parse this specific format, and print a summary. \n\n\n### Share your feedback\n\n\nWhich programming language are you learning or considering learning? Start a\nnew topic on our [community](/community/) forum or Discord and share your\nexperience.\n\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please share your\nthoughts and feedback [in the feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[496,737,759,9],{"slug":2421,"featured":6,"template":693},"learning-python-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learning-python-with-a-little-help-from-ai-code-suggestions.yml","Learning Python With A Little Help From Ai Code Suggestions","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"_path":2427,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2428,"content":2432,"config":2437,"_id":2439,"_type":14,"title":2440,"_source":16,"_file":2441,"_stem":2442,"_extension":19},"/en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"title":2429,"description":2413,"ogTitle":2429,"ogDescription":2413,"noIndex":6,"ogImage":2176,"ogUrl":2430,"ogSiteName":706,"ogType":707,"canonicalUrls":2430,"schema":2431},"Learning Rust with a little help from AI","https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Rust with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-08-10\",\n      }",{"title":2429,"description":2413,"authors":2433,"heroImage":2176,"date":2434,"body":2435,"category":10,"tags":2436},[1262],"2023-08-10","Learning a new programming language can help broaden your software\ndevelopment expertise, open career opportunities, or create fun challenges.\nHowever, it can be difficult to decide on one specific approach to learning\na new language. Artificial intelligence (AI) can help. In this tutorial,\nyou'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a\nguided experience in learning the Rust programming language.\n\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language:\nRust](#learning-a-new-programming-language-rust)\n    - [Development environment for Rust](#development-environment-for-rust)\n    - [Hello, World](#hello-world)\n- [Cargo: Bringing structure into Rust](#cargo-bringing-structure-into-rust)\n\n- [Automation: Configure CI/CD pipeline for\nRust](#automation-configure-cicd-pipeline-for-rust)\n\n- [Continue learning Rust](#continue-learning-rust)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n    - [Flow control: Conditions and loops](#flow-control-conditions-and-loops)\n    - [Functions](#functions)\n    - [Testing](#testing)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\nChoose your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors),\nand follow the documentation to enable code suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nProgramming languages can require an install of the language interpreter\ncommand-line tools or compilers that generate binaries from source code to\nbuild and run the application.\n\n\nTip: You can also use [GitLab Remote Development\nworkspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to\ncreate your own cloud development environments, instead of local development\nenvironments. This blog post focuses on using VS Code and the GitLab Web\nIDE. \n\n\n### VS Code\n\nOn macOS, you can [install VS Code](https://code.visualstudio.com/download)\nas a Homebrew cask or manually download and install it. \n\n\n```shell\n\nbrew install --cask visual-studio-code \n\n```\n\n\nNavigate to the `Extensions` menu and search for `gitlab workflow`. Install\nthe [GitLab workflow extension for VS\nCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). \n\n\nTip: VS Code will also detect the programming languages, and offer to\ninstall additional plugins for syntax highlighting and development\nexperience. \n\n\n### Code Suggestions\n\nIt can help to familiarize yourself with suggestions before actually\nverifying the suggestions. GitLab Code Suggestions are provided as you type,\nso you do not need use specific keyboard shortcuts. To accept a code\nsuggestion, press the `tab` key. Also note that writing new code works more\nreliably than refactoring existing code. AI is non-deterministic, which\nmeans that the same suggestion may not be repeated after deleting the code\nsuggestion. While Code Suggestions is in Beta, we are working on improving\nthe accuracy of generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience. \n\n\n## Learning a new programming language: Rust \n\nNow, let's dig into learning Rust, which is one of the [supported languages\nin Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides a\ngreat tutorial for beginners, together with the official [Rust\nbook](https://doc.rust-lang.org/book/). The [Hands-on Rust\nbook](https://hands-on-rust.com/) shows how to build a 2D game as a more\npractical approach. More examples are shared in [this Rust book\nlist](https://github.com/sger/RustBooks). \n\n\nBefore diving into the source code, make sure to set up your development\nenvironment.\n\n\n### Development environment for Rust\n\n1) Create a new project `learn-rust-ai` in GitLab, and clone the project\ninto your development environment. All code snippets are available in [this\n\"Learn Rust with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai).\n\n\n```shell\n\ngit clone https://gitlab.com/NAMESPACE/learn-rust-ai.git\n\n\ncd learn-rust-ai\n\n\ngit status\n\n```\n\n\n2) Install Rust and the build toolchain. Fortunately, this is\nstraightforward [following the Rust install\ndocumentation](https://www.rust-lang.org/tools/install).\n\n\nTip for using the generic installer: Download the script and run it after\nreview. \n\n\n```\n\n# Download and print the script before running it\n\ncurl -Lvs https://sh.rustup.rs -o rustup-init.sh\n\n\n# Run the Rust installer script\n\nsh rustup-init.sh \n\n```\n\n\nExample on macOS using Homebrew:\n\n\n```\n\nbrew install rust\n\n```\n\n\n1) Optional: Install the [rust-analyzer VS Code\nextension](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer).\n\n\n2) Each exercise will invite you to compile the code with the [`rustc`\ncommand](https://doc.rust-lang.org/rustc/what-is-rustc.html), and later\nusing [`cargo` as build tool and package\nmanager](https://doc.rust-lang.org/cargo/index.html).\n\n\nYou are all set to learn Rust! \n\n\n### Hello, World\n\nWe will start with [Rust by\nExample](https://doc.rust-lang.org/rust-by-example/), and follow the [Hello,\nWorld exercise](https://doc.rust-lang.org/rust-by-example/hello.html).\n\n\nCreate a new file `hello.rs` in the root directory of the project and start\nwith a comment saying `// Hello world`. Next, start writing the `main`\nfunction, and verify the code suggestion.\n\n\n![VS Code hello.rs Rust code suggestion, asking to\naccept](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_suggested.png){:\n.shadow}\n\n\nAccept the suggestion by pressing the `tab` key and save the file (keyboard\nshortcut: cmd s). \n\n\n```\n\n// Hello world\n\n\nfn main() {\n    println!(\"Hello, world!\");\n}\n\n```\n\n\n![VS Code hello.rs Rust code suggestion,\naccepted](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_accepted.png){:\n.shadow}\n\n\nCommit the change to the Git repository. In VS Code, use the keyboard\nshortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to\nsubmit. \n\n\nUse the command palette (`cmd shift p`) and search for `create terminal` to\nopen a new terminal. \n\n\nBuild and run the code.\n\n\n```shell\n\nrustc hello.rs\n\n\n./hello\n\n```\n\n\n![hello.rs Rust code suggestion, accepted, compiled,\nrun](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_cli_build.png){:\n.shadow}\n\n\nTip: Adding [code comments in Rust\n(`//`)](https://doc.rust-lang.org/reference/comments.html) before you start\nwriting a function or algorithm will help Code Suggestions with more context\nto provide better suggestions. In the example above, we did that with `//\nHello world`, and will continue doing so in the next exercises. \n\n\n## Cargo: Bringing structure into Rust\n\n[Cargo](https://doc.rust-lang.org/rust-by-example/cargo.html) is the\nofficial Rust package management tool. It is more than that - you can run\nbuild and test commands because Cargo understands them as well. \n\n\nYou can initialize a new Cargo configuration in the current directory tree\nwith the following command:\n\n\n```shell\n\ncargo init\n\n```\n\n\nThe directory tree invites you to add the source code into the `src/`\ndirectory, while `Cargo.toml` manages the dependencies and used compiler\nversions. The `.gitignore` file is also added including best practices. \n\n\n```shell\n\ntree\n\n.\n\n├── Cargo.toml\n\n├── README.md\n\n├── hello\n\n├── hello.rs\n\n└── src\n    └── main.rs\n```\n\n\nTry building the code and running it using `cargo`.\n\n\n```shell\n\ncargo build\n\n\ncargo run\n\n```\n\n\nCommit all changes and push them to your GitLab project.\n\n\n```shell\n\ngit commit -avm \"Initialize Cargo\"\n\n\ngit push\n\n```\n\n\nAfter exploring Cargo, let's make sure that our code is continuously tested\nwhile learning Rust. The next section explains how to set up [GitLab\nCI/CD](https://about.gitlab.com/topics/ci-cd/) for Rust. \n\n\n## Automation: Configure CI/CD pipeline for Rust\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should run two jobs in\ntwo stages: Build and test the code. The default container\n[image](https://docs.gitlab.com/ee/ci/yaml/#image), `rust:latest`, works in\nthe first iteration. In order to save resources, the CI/CD configuration\nalso supports [caching](https://docs.gitlab.com/ee/ci/caching/) for\ndownloaded dependencies and build objects. The `CARGO_HOME` variable is set\nto the CI/CD job home directory to ensure everything gets appropriately\ncached.\n\n\n```yaml\n\nstages:\n  - build\n  - test \n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:                      \n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory to be cached. \n\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo      \n```\n\n\nThe CI/CD jobs inherit the\n[`default`](https://docs.gitlab.com/ee/ci/yaml/#default) values, and specify\nthe cargo commands in the [`script`\nsection](https://docs.gitlab.com/ee/ci/yaml/#script).\n\n\n```yaml\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n```\n\n\nYou can see an example in [this\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/merge_requests/1/diffs).\n\n\n## Continue learning Rust \n\nMake sure to add new source code into the `src/` directory. \n\n\n### Define variables and print them\n\nPractice adding a few more\n[print](https://doc.rust-lang.org/rust-by-example/hello/print.html)\nstatements into `src/main.rs`, and then build and run the code again.\n\n\n1) Define a variable called `name` and assign your name as string value.\n\n\n2) Print the name, including a string prefix saying `Hello, `. \n\n\n![VS Code main.rs Rust code suggestion, first step in\nprint](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_first.png){:\n.shadow}\n\n\n![VS Code main.rs Rust code suggestion, second step in\nprint](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_second.png){:\n.shadow}\n\n\n1) Open a new terminal in VS Code using the command palette (keyboard\nshortcut `cmd + shift + p`) and search for `terminal`.\n\n\n2) Build and run the code with the `cargo build` and `cargo run` commands. \n\n\n![VS Code terminal with cargo build and run\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_cargo_build_run_terminal.png){:\n.shadow}\n\n\nAn example solution can be found\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/variable_print.rs). \n\n\n### Explore variable types \n\nDefine different variable value types\n([primitives](https://doc.rust-lang.org/rust-by-example/primitives.html))\nand embed them into the `print` statements. Maybe they feel familiar with\nother programming languages?\n\n\nTip: Use code comments to see which code suggestions can be useful to learn.\nStart with typing `// Integer addition` and see what code suggestions you\ncan add.\n\n\n![VS Code main.rs Rust code suggestion, primitive types with literals and\nexpressions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_primitive_types_literals_operators.png)\n\n\nExperiment with GitLab Duo Code Suggestions. The shown examples are\nnon-deterministic, but you may be able to add additions, subscriptions,\nmultiplications, etc., and the corresponding `println` statements just by\naccepting code suggestions and continuing the flow with `enter` or\ncompleting the code statements. This workflow can create a chain of code\nsuggestions that can help you learn the Rust language. \n\n\n![Literals and expressions, first\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_01.png){:\n.shadow}\n\n![Literals and expressions, second\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_02.png){:\n.shadow}\n\n![Literals and expressions, third\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_03.png){:\n.shadow}\n\n\nAn example solution can be found\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/literals_expressions.rs). \n\n\nThe code suggestions are not perfect. Sometimes there are errors that\nrequire you to fix the problems. When writing this blog post, I had to fix\ntwo missing semicolons at the end of the code lines. The great thing about\nthe Rust compiler is that the error messages tell you exactly where the\nproblem happens with suggestions to fix them. Code Suggestions and the\nRust-provided build chain make writing Rust code more efficient. \n\n\n```rust\n\nprintln!(\"Hello, {}!\", name)\n\n\n// Integer subtraction\n\nlet y = 9 - 4\n\n```\n\n\n![Terminal build, errors, Rust compiler\nhelp](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_errors_rust_help.png){:\n.shadow}\n\n\nYou can try to provoke the same error by removing a semicolon at the end of\na statement and then running `cargo build` in the terminal again. The Rust\ncompiler will also warn you about unused variables to help with better code\nquality. The screenshot shows warnings for variable definitions, and also a\nCLI command to fix them. \n\n\n![Terminal build, warnings, Rust compiler\nhelp](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_warnings_rust_help.png){:\n.shadow}\n\n\n### Flow control: Conditions and loops \n\nNext, let's focus on [flow\ncontrol](https://doc.rust-lang.org/rust-by-example/flow_control.html) with\nconditions, loops, etc., and how to implement them.\n\n\n1) Start typing `// Flow control` and see which suggestions are provided.\n\n\n2) Experiment with the code, and continue defining a boolean variable `v`\nwhich is set to true. \n\n\n```rust\n  // Flow control\n  let v = true;\n\n```\n\n\n![Conditions, boolean\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_01.png){:\n.shadow}\n\n![Conditions, boolean variable, if\ncondition](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_02.png){:\n.shadow}\n\n\n1) Start typing `// Loops` and experiment with the code suggestions. \n\n\nLet's assume the loop looks the like following snippet. It does not have a\nloop counter which gets printed on every loop execution.\n\n\n```rust\n\n// Loops\n\nlet mut count = 0;\n\n\nloop {\n    count += 1;\n\n    if count == 10 {\n        break;\n    }\n}\n\n```\n\n\n2) Start typing `println!` and see which code suggestions are provided, for\nexample `println!(\"Count: {}\", count);`. \n\n\n![Loops, loop counter print\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_loops_print_counter.png)\n\n\n3) Apply the suggestions, and execute `cargo build && cargo run` on the\nterminal again. \n\n\nLet's learn more: Rust supports different loop types, for example [while\nloops](https://doc.rust-lang.org/rust-by-example/flow_control/while.html),\n[for\nloops](https://doc.rust-lang.org/rust-by-example/flow_control/for.html),\netc. \n\n\n1) Type `// While loop` and verify the code suggestions. Repeat the same for\n`// For loop`.\n\n\n```rust\n\n// While loops\n\nlet mut count = 0;\n\n\nwhile count \u003C 10 {\n    count += 1;\n    println!(\"Count: {}\", count);\n}\n\n\n// For loops\n\nlet a = [10, 2, 3, 4, 5];\n\n\nfor element in a {\n    println!(\"Element: {}\", element);\n}\n\n```\n\n\nThere is more to learn with loops and conditions: Iterate over arrays,\nlists, maps, slices. Practice with writing comments for `// Maps and sets`\nand `// Vectors and strings`. \n\n\n![Vectors,\nstrings](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_vectors_strings.png){:\n.shadow}\n\n\n```rust\n  // Maps and sets\n  let mut scores = HashMap::new();\n\n  scores.insert(String::from(\"Blue\"), 10);\n  scores.insert(String::from(\"Yellow\"), 50);\n\n  for (key, value ) in &scores {\n      println!(\"{}: {}\", key, value);\n  }\n\n  // Vectors and strings\n  let mut v = Vec::new();\n\n  v.push(1);\n  v.push(2);\n\n  for element in &v {\n      println!(\"Element: {}\", element);\n  }  \n```\n\n\nThis snippet will fail because the `HashMap` type needs to be imported from\n`std::collections::HashMap`. Add the following line on top before the main\nfunction definition: \n\n\n```rust\n\nuse std::collections::HashMap;\n\n``` \n\n\n2) Build and run the code with `cargo build && cargo run`. \n\n\nAn example solution is provided\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/flow_control.rs).\n\n\n**Async exercise**: \n\n\n1) Modify the input values for the different data types, and build and run\nthe code again.\n\n\n2) Add a condition into the loops that print the items only when a specific\ncondition is met (for example, the number is odd or even). \n\n\n### Functions \n\n\n[Functions](https://doc.rust-lang.org/rust-by-example/fn.html) help increase\ncode readability and testability with unit tests. Practice creating\nfunctions with the following steps: \n\n\n1) Two functions `isEven` and `isOdd` to evaluate whether a number is even\nor odd.\n\n\n```rust\n\nfn isEven(x: i32) -> bool {\n    x % 2 == 0\n}\n\n\nfn isOdd(x: i32) -> bool {\n    x % 2 != 0\n}\n\n```\n\n\n2) `isPrime` function to check whether a given integer value is a prime\nnumber.\n\n\n```rust\n\nfn isPrime(x: i32) -> bool {\n    let mut i = 2;\n\n    while i * i \u003C= x {\n        if x % i == 0 {\n            return false;\n        } else {\n            i += 1;\n        }\n    } \n\n    return true\n}\n\n```\n\n\n3) Create an array of integer values, loop over it, and call the functions.\nLet GitLab Code Suggestions guide you with the implementation by starting to\ntype the if conditions followed by the function name. \n\n\n```rust\n  // Functions\n  let mut integers = vec![1, 2, 3, 4, 5];\n\n  for i in integers.iter() {\n\n      if (isEven(i)) {\n          println!(\"{} is even\", i);\n      }\n\n      if (isOdd(i)) { \n          println!(\"{} is odd\", i);\n      }\n\n      if (isPrime(i)) { \n          println!(\"{} is prime\", i);\n      }\n\n      println!(\"{}\", i);\n  }\n```\n\n\nNote that passing a reference value to a function may result in an error\nfrom the Rust compiler. Follow the suggestions and build the code again. \n\n\n```shell\n\n$ cargo build && cargo run \n\n\nerror[E0308]: mismatched types\n   --> src/main.rs:112:21\n    |\n112 |         if (isPrime(i)) { \n    |             ------- ^ expected `i32`, found `&{integer}`\n    |             |\n    |             arguments to this function are incorrect\n    |    \nnote: function defined here\n   --> src/main.rs:136:4\n    |\n136 | fn isPrime(x: i32) -> bool {\n    |    ^^^^^^^ ------\nhelp: consider dereferencing the borrow\n    |\n112 |         if (isPrime(*i)) { \n    |                     +\n```\n\n\nAn example solution is provided\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/functions.rs).\n\n\n**Async exercise**: Create a function `containsString` and test it with an\narray of string values, and a string to search for, in a loop. The\nscreenshot shows a potential implementation. \n\n\n![containsString function, and vector with string elements to test,\nsuggesting its usage in the main\nfunction](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_implemented_then_suggested_in_main.png){:\n.shadow}\n\n\n### Testing \n\nWhile learning programming, adopt\n[testing](https://doc.rust-lang.org/rust-by-example/testing.html) into your\nprocess. This can be unit tests for functions, documentation testing, and\nintegration testing. Practice with testing the previously created functions\n`isEven`, `isOdd`, and `isPrime`. Starty by typing `mod tests {` followed by\na new line with `use super::*` to implement the example from the [Rust\ndocumentation for unit\ntests](https://doc.rust-lang.org/rust-by-example/testing/unit_testing.html).\n\n\n```rust\n\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_is_even() {\n        assert!(isEven(2)); \n        assert!(!isEven(3));\n    }\n\n    #[test] \n    fn test_is_odd() {\n        assert!(!isOdd(2));\n        assert!(isOdd(3));\n    }\n\n    #[test]\n    fn test_is_prime() { \n        assert!(isPrime(2));\n        assert!(!isPrime(3));\n    }\n}\n\n```\n\n\nRun `cargo test` to run the unit tests. Modify the test values to experiment\nwith the results. \n\n\n```shell\n\ncargo test\n\n```\n\n\n![Function unit tests, cargo test output in the VS Code\nterminal](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_unit_tests_terminal_run.png)\n\n\nCreate unit tests that fail, and commit and push the changes to GitLab. The\nCI/CD pipelines will fail in this simulated breakage. The example above\nneeds a fix for the `test_is_prime` test. Commit and push the change to\nverify that the pipeline passes again. \n\n\n```diff\n\n-        assert!(!isPrime(3));\n\n+        assert!(!isPrime(4));\n\n```\n\n\n## What is next \n\nIn an upcoming blog, we will look into advanced learning examples with\nasynchronous operations, services and external API communication in future\nblog posts. Until then, here are a few recommendations for practicing async.\n\n\n### Async learning exercises\n\n- [`std misc`](https://doc.rust-lang.org/rust-by-example/std_misc.html)\nprovides asynchronous operations with threads, channels and file I/O\n\n- Book: [Hands-on Rust: Effective Learning through 2D Game Development and\nPlay](https://pragprog.com/titles/hwrust/hands-on-rust/)\n\n- Tutorial: [Are we game yet?](https://arewegameyet.rs/resources/tutorials/)\n\n- Use case: [Web server with\nrocket.rs](https://rocket.rs/v0.5-rc/guide/quickstart/#running-examples)\n\n\nHere are a few more exercises and ideas for additional learning:\n\n1) The Rust compiler might have created warnings that need to be addressed.\nFollow the instructions from the `cargo build` commands and check the Git\ndiff. \n\n\n```\n\ncargo fix --bin \"learn-rust-ai\"\n\n\ngit diff \n\n```\n\n\n2) [Error handling](https://doc.rust-lang.org/rust-by-example/error.html) is\nrequired when failure is detected, and the caller should know. Some errors\ncan be recovered from within the application, others require program\ntermination. \n\n\n3) The [`std` library](https://doc.rust-lang.org/rust-by-example/std.html)\nextends primitive types and makes programming more efficient. \n\n\n### Share your feedback\n\nWhich programming language are you learning or considering learning? Start a\nnew topic on our [community](/community/) forum or Discord and share your\nexperience.  \n\n\nIf you are using Code Suggestions Beta with [GitLab Duo](/gitlab-duo/)\nalready, please share your thoughts and feedback [in this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[716,2402,737,759,9],{"slug":2438,"featured":6,"template":693},"learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","content:en-us:blog:learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","Learning Rust With A Little Help From Ai Code Suggestions Getting Started","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"_path":2444,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2445,"content":2451,"config":2457,"_id":2459,"_type":14,"title":2460,"_source":16,"_file":2461,"_stem":2462,"_extension":19},"/en-us/blog/machine-learning-and-devsecops",{"title":2446,"description":2447,"ogTitle":2446,"ogDescription":2447,"noIndex":6,"ogImage":2448,"ogUrl":2449,"ogSiteName":706,"ogType":707,"canonicalUrls":2449,"schema":2450},"Machine learning and DevSecOps: Inside the OctoML/GitLab integration","MLOps and DevSecOps teams can unify their workflows and gain automation and cost efficiencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666915/Blog/Hero%20Images/autodevops.jpg","https://about.gitlab.com/blog/machine-learning-and-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Machine learning and DevSecOps: Inside the OctoML/GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sameer Farooqui, OctoML\"}],\n        \"datePublished\": \"2023-02-23\",\n      }",{"title":2446,"description":2447,"authors":2452,"heroImage":2448,"date":2454,"body":2455,"category":1201,"tags":2456},[2453],"Sameer Farooqui, OctoML","2023-02-23","\n\nMachine learning can be a powerful tool in software development, but not if it has to live apart from existing engineering workflows. DevSecOps teams, including MLOps, can now integrate [OctoML CLI](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) into [GitLab’s CI/CD Pipelines](https://docs.gitlab.com/ee/ci/pipelines/) to unify workflows and leverage existing deployment and monitoring infrastructure. This integration makes it easier to catch bugs and model performance degradations early in the ML development cycle. \n\nThe OctoML Platform is a machine learning model optimization and deployment service powered by octoml.ai. [Machine learning has grown in popularity](/blog/top-10-ways-machine-learning-may-help-devops/) in DevSecOps, along with AI, because of its ability to learn and model how to perform complex tasks as a human would and then automate those tasks.\n\n## How does CI/CD apply to machine learning?\n\nOnce a machine learning model has been successfully deployed, it can get stale over time and its accuracy could degrade, a situation called “data drift”. Data drift causes newer inferencing data to drift away from the data used to train the model. In the retail industry, this can happen because of seasonality, as an example.\n\nProduction models must be regularly refreshed by retraining their weights with the latest data. Applying CI/CD concepts borrowed from software engineering, the OctoML CI integration makes the deployment process for trained/re-trained models automated and repeatable.\n\n## How OctoML CLI and GitLab work together\n\nNew commits to your inference code repository can run [OctoML CLI](https://github.com/octoml/octoml-cli-tutorials#readme) in your GitLab pipeline to automatically optimize machine learning models for lowest cost per inference and lowest latency, and then deploy the optimized model to your cloud registry. For customers looking for more granular packaging formats that integrate with existing containerization systems, OctoML offers [Python wheel packaging](https://app.octoml.ai/docs/deploy.html#python-wheel-deployment) and will soon offer YAML configuration files. To reduce model latency and serving costs, OctoML searches through multiple acceleration engines such as Apache TVM, ONNX Runtime, and TensorRT and then suggests the ideal CPU or GPU hardware type on AWS, Azure, or GCP.\n\n## Choice in cloud deployment targets\n\nUsing OctoML CLI, developers can send any trained model to OctoML’s SaaS platform for cost efficiency and cloud hardware benchmarking. By adapting and optimizing the trained model to leverage hardware intrinsics in CPU and GPUs, OctoML makes inferences run faster in production, thus saving users on cost per inference and improving the user experience of ML applications.\n\n![Cloud workflow](https://about.gitlab.com/images/blogimages/octomlintegration/image1.png){: .shadow}\n\nThe cloud workflow is designed for enterprise and production deployments. Here’s how it works:\n\n* The initial push from a developer to the GitLab repository launches a local, shared, or remote runner.\n* The runner will send the updated, trained model first to OctoML’s platform for acceleration and hardware adaptation.\n* Then, the pipeline pushes the accelerated model container to the GitLab Container Registry.\n* Finally, it deploys the container to a managed Kubernetes service in any of the major cloud providers.\n\nModels deployed via the accelerated cloud workflow not only provide end users the lowest latency user experience but also save the organization compute costs at inference time, which can amount to \\[90% of a production machine learning application’s compute costs](https://aws.amazon.com/blogs/machine-learning/reduce-ml-inference-costs-on-amazon-sagemaker-with-hardware-and-software-acceleration/).\n\n## Four required stages for every pipeline\n\nEach pipeline has four stages: setup, package, deploy, and test. Here’s the logical flow:\n\n![Logical flow](https://about.gitlab.com/images/blogimages/octomlintegration/image2.png){: .shadow}\n\n1. common:setup - produces OctoML CLI binary artifact and passes it on to local:package\n2. cloud:package - packages the incoming model into a Docker tarball using the OctoML CLI binary and passes the tarball to the next stage\n3. cloud:deploy - builds a Docker image from the Tarball and deploys the docker container to a remote registry (in our example, we deploy it to AWS via GitLab Container Registry using Flux, but there can be other mechanisms)\n4. cloud:test - run the user-provided test script\n\nWhen a cloud pipeline is executed, the GitLab Pipeline UI will display a corresponding workflow:\n\n![GitLab Pipeline UI](https://about.gitlab.com/images/blogimages/octomlintegration/image3.png){: .shadow}\n\nSimilar to any other GitLab CI/CD job, our [example repository](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) has YAML files that define how each stage will execute. You can easily clone the repository or code and adapt it to your custom model and inference code:\n\n![example repository](https://about.gitlab.com/images/blogimages/octomlintegration/image4.png){: .shadow}\n\nIn addition to the stage YAML files, OctoML CLI also has its own `octoml.yaml` configuration, which defines the path to your model, hardware type the model should be accelerated for, and the model’s input shapes:\n\n![octoml.yaml config](https://about.gitlab.com/images/blogimages/octomlintegration/image5.png){: .shadow}\n\n## Get started with OctoML CLI and GitLab CI/CD\n\nOctoML CLI and GitLab CI/CD unify your software engineering and machine learning pipelines by allowing ML models to be deployed using the same infrastructure and processes you’re currently using for software applications. Further, our integration makes it seamless to start with local model deployments to test end-to-end inference and move to accelerated cloud deployments with minimal changes to your workflow.\n\n**We’ve [published tutorials](https://gitlab.com/octoml/octoml-public/octoml-cli-tutorials) with an NLP (Bertsquad) and Vision (YOLOv5) model for end-to-end examples. So, to get started, download the [OctoML CLI](https://try.octoml.ai/cli/) and [request an acceleration consultation](https://try.octoml.ai/cli/#lp-pom-block-105) to receive a token to OctoML’s SaaS platform.**\n",[233,9,716],{"slug":2458,"featured":6,"template":693},"machine-learning-and-devsecops","content:en-us:blog:machine-learning-and-devsecops.yml","Machine Learning And Devsecops","en-us/blog/machine-learning-and-devsecops.yml","en-us/blog/machine-learning-and-devsecops",{"_path":2464,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2465,"content":2471,"config":2478,"_id":2480,"_type":14,"title":2481,"_source":16,"_file":2482,"_stem":2483,"_extension":19},"/en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"title":2466,"description":2467,"ogTitle":2466,"ogDescription":2467,"noIndex":6,"ogImage":2468,"ogUrl":2469,"ogSiteName":706,"ogType":707,"canonicalUrls":2469,"schema":2470},"How Comet can streamline machine learning on The GitLab DevOps Platform","Here's a step-by-step look at how to bring ML into software development using Comet on GitLab's DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/machine-learning-on-the-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Comet can streamline machine learning on The GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-11-08\",\n      }",{"title":2466,"description":2467,"authors":2472,"heroImage":2468,"date":2474,"body":2475,"category":1078,"tags":2476},[2473],"William Arias","2021-11-08","\n\nBuilding machine learning-powered applications comes with numerous challenges. When we talk about these challenges, there is a tendency to overly focus on problems related to the quality of a model’s predictions—things like data drift, changes in model architectures, or inference latency. \n\nWhile these are all problems worthy of deep consideration, an often overlooked challenge in [ML development](/topics/devops/the-role-of-ai-in-devops/) is the process of integrating a model into an existing software application.  \n\nIf you’re tasked with adding an ML feature to a product, you will almost certainly run into an existing codebase that must play nicely with your model. This is, to put it mildly, not an easy task. \n\nML is a highly iterative discipline. Teams often make many changes to their codebase and pipelines in the process of developing a model. Coupling an ML codebase to an application’s dependencies, unit tests, and CI/CD pipelines will significantly reduce the velocity with which ML teams can deliver on a solution, since each change would require running these downstream dependencies before a merge can be approved.  \n\nIn this post, we’re going to demonstrate how you can use [Comet](https://www.comet.ml/site/) with [GitLab’s DevOps platform](/solutions/devops-platform/) to streamline the workflow for your ML and software engineering teams, allowing them to collaborate without getting in each other's way.      \n\n## The challenge for ML teams working with application teams\n\nLet’s say your team is working on improving a feature engineering pipeline. You will likely have to test many combinations of features with some baseline model for the task to see which combinations make an impact on model performance.     \n \nIt is hard to know beforehand which features might be significant, so having to run multiple experiments is inevitable. If your ML code is a part of your application codebase, this would mean having to run your application’s CI/CD pipeline for every feature combination you might be trying. \n\nThis will certainly frustrate your Engineering and DevOps teams, since you would be unnecessarily tying up system resources, given that software engineering teams do not need to run their pipelines with the same frequency as ML teams do.  \n\nThe other issue is that despite having to run numerous experiments, only a single set of outputs from these experiments will make it to your production application. Therefore, the rest of the assets produced through these experiments are not relevant to your application code.     \n\nKeeping these two codebases separated will make life a lot easier for everyone – but it also introduces the problem of syncing the latest model between two codebases.     \n\n## Use The GitLab DevOps Platform and Comet for your model development process\n\nWith The GitLab DevOps platform and Comet, we can keep the workflows between ML and engineering teams separated, while enabling cross-team collaboration by preserving the visibility and auditability of the entire model development process across teams.     \n\nWe will use two separate projects to demonstrate this process. One project will contain our application code for a handwritten digit recognizer, while the other will contain all the code relevant to training and evaluating our model.  \n\nWe will adopt a process where discussions, code reviews, and model performance metrics get automatically published and tracked within The GitLab DevOps Platform, increasing the velocity and opportunity for collaboration between data scientists and software engineers for machine learning workflows.\n\n## Project setup\n\nOur project consists of two projects: [comet-model-trainer](https://gitlab.com/tech-marketing/devops-platform/comet-model-trainer) and [ml-ui](https://gitlab.com/tech-marketing/devops-platform/canara-review-apps-testing). \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/cometmodeltrainer.png){: .shadow}\n\nThe **comet-model-trainer** repository contains scripts to train and evaluate a model on the MNIST dataset. We have set up The GitLab DevOps Platform in a way that runs the training and evaluation Pipeline whenever a new merge request is opened with the necessary changes.\n\nThe **ml-ui** repository contains the necessary code to build the frontend of our ML application.\n\nSince the code is integrated with Comet, your ML team can easily track the source code, hyperparameters, metrics, and other details related to the development of the model.  \n\nOnce the training and evaluation steps are completed, we can use Comet to fetch summary metrics from the project as well as metrics from the Candidate model and display them within the merge request; This will allow the ML team to easily review the changes to the model. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/buildmodelgraph.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/summarymetrics.png){: .shadow}\n\nIn our case, the average accuracy of the models in the project is 97%. Our Candidate model achieved an accuracy of 99%, so it looks like it is a good fit to promote to production. The metrics displayed here are completely configurable and can be changed as necessary.        \n\nWhen the merge request is approved, the deployment pipeline is triggered and the model is pushed to Comet’s Model Registry. The Model Registry versions each model and links it back to the Comet Experiment that produced it.  \n![Alt text for your image](https://about.gitlab.com/images/blogimages/OpenComet_SparkVideo.gif){: .shadow}    \n\nOnce the model is pushed to the Model Registry, it is available to the application code. When the application team wishes to deploy this new version of the model to their app, they simply have to trigger their specific deployment pipeline.     \n\n## Running the pipeline\n\n### Pipeline outline\n\nWe will run the process outlined below every time a team member creates a merge request to change code in the `build-neural-network`script:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/modelapprove.png){: .shadow}\n\nNow, let’s take a look at the yaml config used to define our CI/CD pipelines depicted in the previous diagram:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/workflowsbranch.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/script.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/registermodel.png){: .shadow}\n\nLet's break down the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs.\n\nWe start by instructing our GitLab runners to utilize Python:3.8 to run the jobs specified in the pipeline: \n\n`Image: python:3.8`\n\nThen, we define the job where we want to build and train the neural network:\n\n`Build-neural-network`\n\n### Build-neural-network \n\nIn this step, we start by creating a folder where we will store the artifacts generated by this job, install dependencies using the requirements.txt file, and finally  execute the corresponding Python script that will be in charge of training the neural network. The training runs in the GitLab runner using the Python image defined above, along with its dependencies.\n\nOnce the `build-neural-network` job has finalized successfully, we move to the next job: `write-report-mr`\n\nHere, we use another image created by DVC that will allow us to publish a report right in the merge request opened by the contributor who changed code in the neural network script. In this way, we’ve brought software development workflows to the development of ML applications. With the report provided by this job, code and model review can be executed within the merge request view, enabling teams to collaborate not only around the code but also the model performance.\n\nFrom the merge request page, we get access to loss curves and other relevant performance metrics from the model we are training, along with a link to the Comet Experiment UI, where richer details are provided to evaluate the model performance. These details include interactive charts for model metrics, the model hyperparameters, and Confusion Matrices of the test set performance, to name a few. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/manualDeploy_SparkVideo.gif){: .shadow}\n\nWhen the team is done with the code and model review,  the merge request gets approved, and the script that generated the model is merged into the main codebase, along with its respective commit and the CI pipeline associated to it. This takes us to the next job: \n\n### Register-model\n\nThis job uses an integration between GitLab and Comet to upload the reviewed and accepted version of the model to the Comet Model Registry. If you recall, the Model Registry is where models intended for production can be logged and versioned. In order to run the commands that will register the model, we need to set up these variables: \n\n- COMET_WORKSPACE\n- COMET_PROJECT_NAME \n \nIn order to do that, follow the steps described [here](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-an-instance).\n\nIt is worth noting that the `register-model` job only runs when the merge request gets reviewed and approved, and this behavior is obtained by setting `only: main` at the end of the job.\n\nFinally, we decide to let a team member have final control of the deployment so therefore we define a manual job:\n`Deploy-ml-ui`\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/deployuiml.png){: .shadow}\n\nWhen triggered, this job will import the model from Comet’s Model Registry and automatically create the necessary containers to build the user interface and deploy to a Kubernetes cluster. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/downstream.png){: .shadow}\n\nThis job triggers a downstream pipeline, which means that the UI for this MNIST application resides in a different project. This keeps the codebase for the UI and model training separated but integrated and connected at the moment of deploying the model to a production environment.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/multipipeline_SparkVideo.gif){: .shadow}\n\n## Key takeaways\n\nIn this post, we addressed some of the challenges faced by ML and software teams when it comes to collaborating on delivering ML-powered applications. Some of these challenges include:\n\n* The discrepancy in the frequency with which each of these teams need to iterate on their codebases and CI/CD pipelines.\n\n* The fact that only a single set of experiment assets from an ML experimentation pipeline is relevant to the application.\n\n* The challenge of syncing a model or other experiment assets across independent codebases.   \n\nUsing The GitLab DevOps Platform and Comet, we can start bridging the gap between ML and software engineering teams over the course of a project. \n\nBy having model performance metrics adopted into software development workflows like the one we saw in the issue and merge request, we can keep track of the code changes, discussions, experiments, and models created in the process. All the operations executed by the team are recorded, can be audited, are end-to end-traceable, and (most importantly) reproducible. \n\nWatch a demo of this process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W_DsNl5aAVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_About Comet:_\nComet is an MLOps Platform that is designed to help data scientists and teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! \n\nLearn more about Comet [here](https://www.comet.ml/site/) and get started for free!\n\n\n\n",[917,2477,233,9],"demo",{"slug":2479,"featured":6,"template":693},"machine-learning-on-the-gitlab-devops-platform","content:en-us:blog:machine-learning-on-the-gitlab-devops-platform.yml","Machine Learning On The Gitlab Devops Platform","en-us/blog/machine-learning-on-the-gitlab-devops-platform.yml","en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"_path":2485,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2486,"content":2491,"config":2496,"_id":2498,"_type":14,"title":2499,"_source":16,"_file":2500,"_stem":2501,"_extension":19},"/en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"title":2487,"description":2488,"ogTitle":2487,"ogDescription":2488,"noIndex":6,"ogImage":750,"ogUrl":2489,"ogSiteName":706,"ogType":707,"canonicalUrls":2489,"schema":2490},"Mastering GitLab admin tasks with GitLab Duo Chat","Learn how to use Chat to streamline administrative tasks on self-managed instances, improving efficiency and problem-solving capabilities.","https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mastering GitLab admin tasks with GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-08-09\",\n      }",{"title":2487,"description":2488,"authors":2492,"heroImage":750,"date":2493,"body":2494,"category":10,"tags":2495},[1220],"2024-08-09","As a GitLab administrator managing a self-hosted instance, you often face\ncomplex challenges that require innovative solutions. Enter [GitLab Duo\nChat](https://about.gitlab.com/gitlab-duo/) – your AI-powered assistant that\ncan significantly streamline your administrative tasks. In this article,\nwe'll explore how you can leverage GitLab Duo Chat to solve intricate\nproblems efficiently, using a real-world example of updating group\nmemberships across multiple groups.\n\n\n## The power of GitLab Duo Chat for admins\n\n\nGitLab Duo Chat is more than just conversational AI; it's a powerful tool\nthat can assist with complex administrative tasks. By providing\ncontext-aware suggestions and code snippets, Chat can help you navigate\nthrough GitLab's extensive feature set and underlying architecture.\n\n\n### Case study: Updating group memberships\n\n\nLet's dive into a scenario where an admin needs to add an administrator user\nto multiple [groups](https://docs.gitlab.com/ee/user/group/) – in this case,\n50,000 groups. This task, while conceptually simple, can be daunting due to\nits scale.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dBd957MK_DE?si=JYTzdRjVQHyB6rpl\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Step-by-step problem-solving with GitLab Duo Chat\n\n\n* **Initial approach:** Our first instinct might be to use GitLab's API, but\nwith 50,000 groups, this could put significant strain on the system.\n\n* **Pivoting to the Rails console:** Instead, we can use the Rails console\nfor a more efficient solution. Here's where Chat comes in handy.\n\n* **Crafting the solution:** Let's walk through the process of creating a\nRuby script to accomplish our task:\n\n\n``` python\n\n\ndef add_admin_to_groups(parent_group, admin_user)\n     unless parent_group.members.exists?(user_id: admin_user.id)\n       parent_group.add_owner(admin_user)\n       puts \"Added #{admin_user.name} to #{parent_group.name}\"\n     end\n\n    parent_group.descendants.each do |subgroup|\n      add_admin_to_groups(subgroup, admin_user)\n    end\n  end\n\n# Usage\n\n\nadmin_user = User.find_by(username: 'mr_admin')\n\nparent_group = Group.find_by(path: 'top_group')\n\nadd_admin_to_groups(parent_group, admin_user)\n\n```\n\n\n* **Iterative refinement:** Through conversation with Chat, we refined this\nscript, addressing issues like method availability and parameter\nrequirements.\n\n\n### Key takeaways\n\n\n#### Non-deterministic responses\n\n\nGitLab Duo Chat may provide different answers to the same question,\nencouraging exploration of multiple solutions. As an example:\n\n\nAn admin asks, ***How can I improve CI/CD pipeline performance?***\n\n\nChat's first response could be:\n\n***Consider using GitLab CI/CD caching to speed up subsequent pipeline runs.\nHere's an example .gitlab-ci.yml snippet:***\n\n\n```\n\ncache:\n  key: ${CI_COMMIT_REF_SLUG}\n  paths:\n    - node_modules/    \n ```\n\n***This caches the `node_modules` directory, reducing build times for\nNode.js projects.***\n\n\nWhen asked again, Chat's second response could be: ***To improve CI/CD\nperformance, try parallelizing your jobs. Here's an example:***\n\n\n```\n\ntest:\n  parallel: 3\n```\n\n\n***This runs your test job across three parallel runners, potentially\nreducing overall pipeline duration.***\n\n\n### Error interpretation and debugging\n\n\nChat can help interpret error messages and suggest fixes. As an example, an\nadmin encounters an error: \"fatal: reference is not a tree: 8a7d460\". They\ncould ask Chat: ***I'm seeing this Git error. What does it mean and how can\nI fix it?***\n\n\nChat could respond with:\n\n\n***This error typically occurs when Git can't find a specific commit. It\ncould be due to a corrupted repository or incomplete fetch. Try these\nsteps:***\n\n* ***Run `git fsck` to check for corruption.***\n\n* ***If that doesn't work, try git fetch --all --prune to update all\nremotes.***\n\n* ***If the issue persists, you might need to re-clone the repository.***\n\n\n***Always back up your repository before attempting these fixes.***\n\n\n### Code optimization and refactoring\n\n\nChat can assist in optimizing and refactoring code for better performance.\n\n\nAn example: An admin wants to optimize a Gitaly configuration for a large\ninstance. They could ask Chat: ***How can I optimize Gitaly for an instance\nwith 10,000+ repositories?***\n\n\nChat could respond with: ***For a large instance, consider this optimized\nGitaly configuration:***\n\n\n```\n\n[concurrency]\n\nrpc_concurrency = 100\n\n\n[rate_limiting]\n\nrpc_rate_limit = 100\n\n\n[cgroups]\n\nmountpoint = '/sys/fs/cgroup'\n\nhierarchy_root = 'gitaly'\n\nmemory_bytes = 30000000000  # Adjust based on available memory\n\ncpu_shares = 1024\n\n```\n\n***This configuration increases concurrency, implements rate limiting to\nprevent overload, and uses cgroups (control groups) to manage resource\nallocation. Adjust values based on your specific hardware and needs.***\n\n\n## Best practices for using GitLab Duo Chat in admin tasks\n\n\n### Formulate queries clearly and provide context\n\n\nWhen using Chat, the quality of the response largely depends on the quality\nof your query. Clear, detailed questions that provide relevant context will\nyield more accurate and helpful answers.\n\n\nPoor query: ***Why is GitLab slow?***\n\n\nThis query lacks specifics and context, making it difficult for Chat to\nprovide a targeted response. \n\n\nA better query would be: ***Our GitLab instance with 5,000 users and 3,000\nprojects is experiencing slow response times, especially during peak hours\n(9-11 AM EST). CPU usage on the application servers spikes to 90%. How can\nwe diagnose and address this?***\n\n\nThis improved query provides crucial details:\n\n\n* scale of the instance (5,000 users, 3,000 projects)\n\n* nature of the problem (slow response times)\n\n* timing of the issue (peak hours, 9-11 AM EST)\n\n* observed symptoms (90% CPU spike)\n\n\nWith this information, Chat can provide more targeted advice.\n\n\nAn even better query would be: ***We're running GitLab 15.8.3 on a 3-node\ncluster (8 vCPUs, 32GB RAM each) with a separate PostgreSQL 13 database and\nRedis 6.2 instance. Our instance hosts 5,000 users and 3,000 projects. We're\nexperiencing slow response times (average 5s, up from our usual 1s) during\npeak hours (9-11 AM EST), primarily affecting merge request creation and\npipeline initiation. CPU usage on the application servers spikes to 90%,\nwhile database CPU remains under 60%. Gitaly CPU usage is around 70%. We've\nalready increased Puma workers to 8 per node. What additional diagnostics\nshould we run and what potential solutions should we consider?***\n\n\nThis query provides an extensive context, including:\n\n* GitLab version and infrastructure details\n\nspecific performance metrics (response time increase)\n\n* affected operations (merge requests, pipelines)\n\n* resource usage across different components\n\n* steps already taken to address the issue\n\n\nBy providing this level of detail, you enable Chat to:\n\n* understand the full scope of your environment\n\n* identify potential bottlenecks more accurately\n\n* suggest relevant diagnostic steps\n\n* propose solutions tailored to your specific setup\n\n\nAvoid recommending steps you've already taken.\n\n\nRemember, while GitLab Duo Chat is powerful, it's not omniscient. The more\nrelevant information you provide, the better it can assist you. By following\nthese guidelines, you'll get the most out of your interactions with Chat,\nleading to more effective problem-solving and administration of your GitLab\ninstance.\n\n\n### Use GitLab Duo Chat's suggestions as a starting point and refine\nincrementally\n\n\nChat is an excellent tool for getting started with complex tasks, but it's\nmost effective when used as part of an iterative process. Begin with a broad\nquestion, then use Chat's responses to guide your follow-up questions,\ngradually refining your understanding and solution.\n\n\n#### Initial query\n\n\nAdmin: ***How can I set up Geo replication for disaster recovery?***\n\n\nChat might respond with a basic setup guide, covering:\n\n- prerequisites for Geo setup\n\n- steps to configure the primary node\n\n- process for adding a secondary node\n\n- initial replication process\n\n\nThis provides a foundation, but complex setups like Geo often require more\nnuanced understanding. Here's how you might refine your queries:\n\n\n**- Follow-up Query 1**\n\n\nAdmin: ***How do I handle custom data in Geo replication?***\n\nThis question addresses a specific concern not covered in the initial\nsetup. \n\n\n**- Follow-up Query 2**\n\n\nAdmin: ***What's the best way to test failover without disrupting\nproduction?***\n\n\nThis query focuses on a critical operational concern. \n\n\n**- Follow-up Query 3**\n\n\nAdmin: ***Can you help me create a runbook for Geo failover?***\n\n\nThis final query aims to consolidate the gathered information into a\npractical guide. The benefits of this incremental approach:\n\n\n1. By breaking down the complex topic of Geo replication into smaller,\nfocused queries, you gain a more thorough understanding of the subject.\n\n2. Each follow-up question allows you to address specific concerns relevant\nto your environment, resulting in a more customized solution.\n\n3. The progression from setup to testing to creating a runbook ensures that\nyou're not just understanding the theory, but also preparing for real-world\nimplementation.\n\n4. The step-by-step process of refining your queries helps in better\nretention of the information, as you're actively engaging with the content.\n\n5. Follow-up questions often reveal aspects of the task you might not have\ninitially considered, leading to a more robust final solution.\n\n\n#### Best practices for incremental refinement\n\n\n- Start with broad questions to establish a foundation.\n\n- Use Chat's responses to identify areas that need more clarification or\ndetail.\n\n- Ask about specific scenarios or edge cases relevant to your environment.\n\n- Seek practical advice on implementation, testing, and maintenance.\n\n- Use your growing understanding to formulate increasingly specific and\ncomplex queries.\n\n- Periodically summarize your understanding and ask Chat to validate or\ncorrect it.\n\n\nRemember, while GitLab Duo Chat is a powerful tool, it's most effective when\ncombined with your own critical thinking and domain knowledge. Use it to\nguide your learning and decision-making process, but always validate\nimportant information and test solutions in a safe environment before\napplying them to production systems.\n\n\n### Potential applications for self-managed GitLab instances\n\n\n#### User management at scale\n\n\nAdmin: ***We need to onboard 1,000 new users from an acquired company. The\ndata is in a CSV file. How can we automate this process? I am an Admin, can\nI use a Python script for this?***\n\n\nChat: ***You can use GitLab's API for bulk user creation. Here's a Python\nscript to get you started:***\n\n\n```python\n\nimport requests\n\nimport csv\n\n\ndef create_user(username, email, name, password):\n    url = 'https://gitlab.example.com/api/v4/users'\n    headers = {'PRIVATE-TOKEN': 'your_access_token'}\n    data = {\n        'username': username,\n        'email': email,\n        'name': name,\n        'password': password,\n        'skip_confirmation': True\n    }\n    response = requests.post(url, headers=headers, data=data)\n    return response.status_code == 201\n\nwith open('new_users.csv', 'r') as f:\n    reader = csv.reader(f)\n    for row in reader:\n        username, email, name, password = row\n        if create_user(username, email, name, password):\n            print(f\"Created user: {username}\")\n        else:\n            print(f\"Failed to create user: {username}\")\n```\n\n***This script reads user data from a CSV file and creates users via the\nGitLab API. Remember to handle errors and rate limiting for large\nbatches.***\n\n\n#### Project and group administration\n\n\nAdmin: ***We need to implement a standardized project structure across our\norganization. How can we automate this? Assume I am an admin. Write me a\nscript to do this via the API.***\n\n\nChat: ***You can use GitLab's API to create a project template and then\napply it to new projects. Here's a bash script to create a new project with\na standard structure:***\n\n\n```\n\n#!/bin/bash\n\n\nTOKEN=\"your_access_token\"\n\nGROUP_ID=\"your_group_id\"\n\n\ncreate_project() {\n  curl --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --data \"name=$1&namespace_id=$GROUP_ID\" \\\n  \"https://gitlab.example.com/api/v4/projects\"\n}\n\n\nadd_file() {\n  curl --request POST --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --form \"branch=main\" \\\n  --form \"content=$2\" \\\n  --form \"commit_message=Add $1\" \\\n  \"https://gitlab.example.com/api/v4/projects/$3/repository/files/$1\"\n}\n\n\nPROJECT_ID=$(create_project \"New-Standardized-Project\" | jq '.id')\n\n\nadd_file \"README.md\" \"# Welcome to our standardized project\" $PROJECT_ID\n\nadd_file \".gitlab-ci.yml\" \"stages:\\n  - build\\n  - test\\n  - deploy\"\n$PROJECT_ID\n\nadd_file \"CONTRIBUTING.md\" \"## How to contribute to this project\"\n$PROJECT_ID\n\n\necho \"Project created with ID: $PROJECT_ID\"\n\n```\n\n\n***This script creates a new project with a README, a basic CI/CD\nconfiguration, and a CONTRIBUTING guide. You can expand this to include more\nfiles or even clone a template repository.***\n\n\n#### Performance optimization\n\n\nAdmin: ***Our merge request creation process is slow. How can we diagnose\nand improve this? Assume I am an admin.***\n\n\nChat: ***As an instance admin, there are a few potential areas you can\ninvestigate to diagnose and improve the merge request creation process:\n\nDatabase Performance: The provided quotes show some example SQL queries\nrelated to merge request metrics. Slow database queries can significantly\nimpact merge request creation performance. You should review the database\nperformance, indexes, and query plans for any queries involved in the merge\nrequest creation process.***\n\n\n- ***Background jobs: Merge request creation may involve background jobs\n(e.g., for data processing, notifications, etc.). If these jobs are backed\nup or running slowly, it can impact the overall merge request creation time.\nCheck the background job queues and workers.***\n\n- ***Code optimization: Analyze the code responsible for merge request\ncreation to identify any potential bottlenecks or inefficiencies. This could\ninclude things like unnecessary database queries, inefficient algorithms, or\nlack of caching.***\n\n- ***Resource utilization: Monitor the resource utilization (CPU, memory,\ndisk I/O) of the GitLab application servers during merge request creation.\nHigh resource utilization can lead to performance degradation.***\n\n- ***Network performance: If your GitLab instance is distributed across\nmultiple nodes or servers, network performance between these components can\nimpact merge request creation times. Check for any network bottlenecks or\nlatency issues.***\n\n- ***Caching: Ensure that appropriate caching mechanisms are in place and\nfunctioning correctly. Inefficient or incorrect caching can lead to\nunnecessary overhead during merge request creation.***\n\n- ***Load testing: Perform load testing to simulate real-world scenarios and\nidentify any performance bottlenecks or scalability issues that may arise\nunder high load.***\n\n- ***Logging and monitoring: Enable detailed logging and monitoring for the\nmerge request creation process. This can help identify specific areas of\nslowness or bottlenecks.***\n\n\n***To effectively diagnose and improve the merge request creation process,\nyou'll likely need to combine multiple techniques, such as profiling, code\nanalysis, database optimization, and load testing. Additionally, consulting\nthe GitLab documentation and community resources can provide valuable\ninsights and best practices specific to your GitLab version and\nconfiguration.***\n\n\n#### Troubleshooting complex issues\n\n\nAdmin: ***We're experiencing intermittent 502 errors on our GitLab instance.\nHow can we troubleshoot this? I am an instance admin, how can I check the\nlogs to find answers?***\n\n\nChat: ***To troubleshoot intermittent 502 errors on your GitLab instance as\nan admin, you can follow these steps:***\n\n\n***1. Check the main GitLab logs while reproducing the issue by running:***\n\n\n```\n\nsudo gitlab-ctl tail | tee /tmp/\u003Ccase-ID-and-keywords>.log\n\n```\n\n\n***2. Press Ctrl+C to stop tailing the logs.***\n\n\n***3. Run GitLabSOS to gather broader diagnostics about your instance:***\n\n\n```\n\nsudo /opt/gitlab/embedded/bin/gitlab-sos\n\n```\n\n\n***4. Use the fast-stats tool to analyze performance statistics from the\nlogs:***\n\n\n```\n\nsudo /opt/gitlab/embedded/bin/fast-stats\n/var/log/gitlab/gitlab-rails/production.log\n\n```\n\n\n***5.Look for relevant log entries using the correlation ID that is present\nin most requests.***\n\n\n***6. Check the Pages logs (gitlab_pages_error.log) for any specific errors\nrelated to Pages communication issues with the GitLab API or loopback\ntimeouts if using an AWS Network Load Balancer.***\n\n\n***7. Ensure your gitlab-secrets.json file is synced across all GitLab Pages\ninstances if using `domain_config_source=auto`.***\n\n\n***8. If using systemd, configure GitLab Pages to listen on an explicit IP\nand port to avoid /tmp being cleaned up.***\n\n\n## Get started with GitLab Duo Chat\n\n\nGitLab Duo Chat is a powerful ally for administrators of self-managed GitLab\ninstances. By leveraging its capabilities, you can tackle complex tasks more\nefficiently, learn new techniques, and ultimately become a more effective\nGitLab administrator.\n\n\nWe encourage you to experiment with Chat in your administrative workflows.\nRemember to use it responsibly and always verify the solutions it provides.\n\n\n> [Try GitLab Duo for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n\n### Resources\n\n- [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/)\n\n- [GitLab Rails Console Cheat\nSheet](https://docs.gitlab.com/ee/administration/operations/rails_console.html)\n\n- [GitLab API documentation](https://docs.gitlab.com/ee/api/)\n\n- [10 best practices for using AI-powered GitLab Duo\nChat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI\nassistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)\n",[9,737,496,717,738],{"slug":2497,"featured":91,"template":693},"mastering-gitlab-admin-tasks-with-gitlab-duo-chat","content:en-us:blog:mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","Mastering Gitlab Admin Tasks With Gitlab Duo Chat","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"_path":2503,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2504,"content":2510,"config":2515,"_id":2517,"_type":14,"title":2518,"_source":16,"_file":2519,"_stem":2520,"_extension":19},"/en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics",{"title":2505,"description":2506,"ogTitle":2505,"ogDescription":2506,"noIndex":6,"ogImage":2507,"ogUrl":2508,"ogSiteName":706,"ogType":707,"canonicalUrls":2508,"schema":2509},"Measuring AI effectiveness beyond developer productivity metrics ","AI assistants are here, yet measuring AI's impact on productivity isn’t figured out. Here’s why it’s a difficult problem and how GitLab is solving it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671994/Blog/Hero%20Images/AdobeStock_700757695.jpg","https://about.gitlab.com/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Measuring AI effectiveness beyond developer productivity metrics \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2024-02-20\",\n      }",{"title":2505,"description":2506,"authors":2511,"heroImage":2507,"date":2512,"body":2513,"category":10,"tags":2514},[1455],"2024-02-20","AI-powered productivity tools promise to boost productivity by automating repetitive coding and tedious tasks, as well as generating code. How organizations measure the AI impact of these productivity tools has yet to be truly figured out. GitLab is working on a solution: AI Impact is a dashboard grounded in [value stream analytics](https://about.gitlab.com/solutions/value-stream-management/) that will help organizations understand the effect of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features, on their productivity. AI Impact is the culmination of what we’ve learned at GitLab about measuring the impact of AI, and we wanted to share those lessons with you.\n\n[A report for The Pragmatic Engineer](https://newsletter.getdx.com/p/developer-productivity-metrics-at-top-companies) shows that measuring productivity in general isn’t straightforward, with top engineering teams around the globe all using different metrics. If everyone has a different productivity metric to optimize, how do we even begin to measure the impact of AI productivity tools? Welcome to why measuring AI assistant productivity impact is difficult and commonly misses the mark.\n\n>  Follow the progress of our AI Impact dashboard and [share your feedback](https://gitlab.com/groups/gitlab-org/-/epics/12978).\n\n## Flawed productivity metrics\n\nSimplistic productivity metrics like lines of code contributed per day or acceptance rates of AI suggestions fail to capture downstream costs. For instance, GitClear, according to [an Infoworld article](https://www.infoworld.com/article/3712685/is-ai-making-our-code-stupid.html), “analyzed 153 million lines of changed code between January 2020 and December 2023 and now expects that code churn ('the percentage of lines that are reverted or updated less than two weeks after being authored') will double in 2024.\" Thus, simply measuring lines of code risks technical debt pileup and skill atrophy in developers.  \n\n## Indirect impacts are hard to quantify\n\nThe goal of AI developer tools is to remove toil, allowing developers to focus on higher value tasks like system architecture and design. But how much time is really saved this way versus spent reviewing, testing, and maintaining AI-generated code? These second-order productivity impacts are very difficult to accurately attribute directly to AI, which may give you a false sense of value. One solution to this is to choose who gets to use AI productivity tools carefully.\n\n## Focus should be on business outcomes\n\nUltimately, what matters is real-world business outcomes, not developer activity metrics. Tracking lead time, cycle time, production defects, and user satisfaction better indicate where bottlenecks exist. If AI tools generate usable code faster, and quality teams can’t keep up with changes, the end software product may decrease in quality and lead to customer satisfaction problems. Shipping more sounds great until it causes problems that take even more time, money, and effort to resolve. Measuring business outcomes is also difficult and these measurements frequently are lagging indicators of problems. Measuring quality defects, security issues, and application performance are all ways to identify business impact sooner. \n\n## The need to balance speed and quality\n\nWhile AI code generation has the potential to accelerate development velocity, it should not come at the cost of overall quality and maintainability. Teams must strike the right balance between velocity and writing maintainable, well-tested code that solves actual business problems. Quality should not be sacrificed purely to maximize productivity metrics. This is when measuring lines of code AI generates or number of AI suggestions developers accept can optimize for the problematic outcomes. More code doesn't necessarily mean higher quality or productivity. More code means more to review, test, and maintain – potentially slowing delivery down.\n\nLet’s look at an example: AI-generated code output is scoped to the area a developer is currently working on. Current AI tools lack the ability to assess the broader architecture of the application (amplified in a microservices architecture). This means that even if the quality of the generated code is good, it may lead to repetition and code bloat because it will be inserted into the area targeted rather than making wider systematic changes. This is problematic in languages that are architected with object-oriented languages that use DRY (don't-repeat-yourself) principles. This is an active area of research and we’re excited to adopt new approaches and technologies to increase the context awareness of our AI features.\n\nAcceptance rate can be particularly misleading, and unfortunately is becoming the primary way AI productivity tools measure success. Developers may accept an AI-generated suggestion but then need to heavily edit or rewrite it. Thus, the initial acceptance gives no indication of whether the suggestion was actually useful. Acceptance rate is fundamentally a proxy for AI assistant quality, yet it is misconstrued as a productivity measure. This is especially misleading when all vendors are measuring acceptance rate differently and marketing based on this number. GitLab intentionally does not use this kind of data in our marketing. What we’ve seen in practice is that developers use AI-generated code similar to how an actor uses a cue – they look at the generated code and say, \"oh, right, that's the nudge I needed, I'll take it from here.\" \n\n## Implementation and team dynamics play a key role\n\nHow productivity gains materialize depends on how AI tools are implemented and developer dynamics. If some developers distrust the technology or reviews become lax expecting AI to catch errors, quality may suffer. Additionally, introducing AI tools often necessitates changes to processes like code reviews, testing, and documentation. Productivity could temporarily decline as teams adjust to new workflows before seeing gains. Organizations must ensure that when implementing AI tools, that they allow teams time to figure out how it works and how it fits into their workflows, knowing that this trial-and-error period may lead to reduced productivity metrics before seeing productivity gains. \n\nTo get this balance right, it’s important to define the tasks that are highly accurate and consistent and train the team to use AI for those use cases (at least, at first). We know that AI code generation is useful for producing scaffolding, test generation, and syntax corrections, as well as generating documentation. Have teams start there and they will see better results and learn to use the tool more effectively. Remember you can’t measure AI’s impact in a week. You have to give teams time to find their rhythm with their AI assistants. \n\n## Challenges exist, but AI is the future\n\nNow that we’ve talked about the challenges of measuring AI impact and potential risks, we do want to say at GitLab we do believe AI has a huge role to play in the evolution of DevSecOps platforms. That’s why we’re building GitLab Duo. But we are not rushing into productivity measurement by showing acceptance rates, or lines of code generated. We believe these are a step backwards to previous ways of thinking about productivity. Instead we’re looking at the data we have within our unified DevSecOps platform to present a more complete picture of AI Impact.  \n\n## What to measure instead\n\nMeasuring the productivity impacts of AI developer tools requires nuance and a focus on end-to-end outcomes rather than isolated productivity metrics. For these reasons, simple quantitative metrics tend to miss the nuances of measuring productivity with AI developer tools. The key is to combine quantitative data from across the software development lifecycle (SDLC) with qualitative feedback from developers on how AI actually impacts their day-to-day experience and shapes long-term development practices. Only then can we get an accurate picture of the productivity gains these tools can offer. We view AI as an augmentor to DevSecOps adoption, rather than a replacement for doing things the right way. Organizations focusing on building the right muscles in their SDLC practice are the ones best positioned to actually take advantage of any potential gains in developer coding productivity.\n\nSo what metric should we use instead? At GitLab we already have [value stream analytics](https://about.gitlab.com/solutions/value-stream-management/), which examine the end-to-end flow of work from idea to production to determine where bottlenecks exist. Value stream analytics isn’t a single measurement, it’s the ongoing tracking of metrics like lead time, cycle time, deployment frequency, and production defects. This keeps the focus on business outcomes rather than developer activity. By taking a holistic view across code quality, collaboration, downstream costs, and developer experience, teams can steer these technologies to augment (rather than replace) human abilities over the long run. \n\n## Introducing GitLab's AI Impact approach\n\nGitLab has the whole picture being a unified DevSecOps platform that spans the entire SDLC. We built [Value Stream Management](https://about.gitlab.com/solutions/value-stream-management/) to empower teams with metrics and insights to ship better software faster. Blending GitLab [Value Stream Analytics](https://about.gitlab.com/solutions/value-stream-management/) and [DORA metrics](https://about.gitlab.com/solutions/value-stream-management/dora/), and GitLab Duo usage data, we can provide organizations with the complete picture of how AI is impacting their SDLC. We’re calling this dashboard AI Impact, and it’s coming in an upcoming release to measure GitLab Duo’s impact on productivity. Follow our progress and [share your feedback](https://gitlab.com/groups/gitlab-org/-/epics/12978). \n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[9,496,716,738],{"slug":2516,"featured":91,"template":693},"measuring-ai-effectiveness-beyond-developer-productivity-metrics","content:en-us:blog:measuring-ai-effectiveness-beyond-developer-productivity-metrics.yml","Measuring Ai Effectiveness Beyond Developer Productivity Metrics","en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics.yml","en-us/blog/measuring-ai-effectiveness-beyond-developer-productivity-metrics",{"_path":2522,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2523,"content":2527,"config":2534,"_id":2536,"_type":14,"title":2537,"_source":16,"_file":2538,"_stem":2539,"_extension":19},"/en-us/blog/measuring-ai-roi-at-scale-a-practical-guide-to-gitlab-duo-analytics",{"config":2524,"title":2525,"description":2526},{"noIndex":6},"Measuring AI ROI at scale: A practical guide to GitLab Duo Analytics","Learn how to transform raw usage data into actionable business insights and ROI calculations with this in-depth tutorial.",{"title":2525,"description":2526,"authors":2528,"heroImage":868,"body":2530,"category":10,"tags":2531,"date":2532,"updatedDate":2533},[2529],"Paul Meresanu","AI investment starts with measurement. Building a successful AI-powered\ndevelopment platform begins with understanding actual usage, adoption\npatterns, and quantifiable business value — especially ROI from [GitLab Duo\nEnterprise](https://about.gitlab.com/gitlab-duo/).\n\n\nTo help our customers maximize their AI investments, we developed the GitLab Duo Analytics solution as part of our Duo Accelerator program — a comprehensive, customer-driven solution that transforms raw usage data into actionable business insights and ROI calculations. This is not a GitLab product, but rather a specialized enablement tool we created to address immediate analytics needs while organizations transition toward comprehensive AI productivity measurement.\n\n\nThis foundation enables broader AI transformation. For example, organizations can use these insights to optimize license allocation, identify high-value use cases, and build compelling business cases for expanding AI adoption across development teams.\n\n\nA leading financial services organization partnered with a GitLab customer success architect through the Duo Accelerator program to gain visibility into their GitLab Duo Enterprise investment. Together, we implemented a hybrid analytics solution that combines monthly data collection with real-time API integration, creating a scalable foundation for measuring AI productivity gains and optimizing license utilization at enterprise scale.\n\n\n## The challenge: Measuring AI ROI in enterprise development\n\n\nBefore implementing any analytics solution, it's essential to understand your AI measurement landscape.\n\n\nConsider:\n\n\n- **What GitLab Duo features need measurement?** (code suggestions, chat assistance, security scanning)?\n\n- **Who are your AI users?** (developers, security teams, DevOps engineers)?\n\n- **What business metrics matter?** (time savings, productivity gains, cost optimization)?\n\n- **How does your current data collection work** (manual exports, API integration, existing tooling)?\n\n\nUse this stage to define your:\n\n\n- ROI measurement framework\n\n- Key performance indicators (KPIs)\n\n- Data collection strategy\n\n- Stakeholder reporting requirements\n\n\n### Sample ROI measurement framework\n\n\n![Sample ROI measurement framework](https://gitlab.com/-/project/54775568/uploads/06da2f5c3a75197cd272aedb3d67a347/image.png)\n\n\n## Step-by-step implementation guide\n\nImportant: The solution below describes an open source approach that you can deploy in your own environment. It is **NOT** a commercial product from GitLab that you need to purchase. You should be able download, customize, and run this solution free of charge.\n\n### Prerequisites\n\n**Before starting, ensure you have:**\n\n- GitLab instance with Duo enabled\n\n- GitLab API token with read permissions\n\n- Access to configure GitLab CI/CD variables\n\n- Basic understanding of GitLab CI/CD pipelines\n\n\n### 1. Initial setup and configuration\n\nLet's set up the project environment by first cloning the repository.\n\n```bash\ngit clone https://gitlab.com/gitlab-org/professional-services-automation/tools/utilities/gitlab-graphql-api.git\ncd gitlab-graphql-api\n```\nThen push it to your own GitLab instance or namespace, and navigate to your project in GitLab to configure the CI/CD variables.\n\n### 2. Configure pipeline control variables \n\nControl which analytics pipelines run by setting Project CI/CD Variables. \nGo to **Project Settings → CI/CD → Variables** and add:\n\n| Variable | Default | Description |\n|----------|---------|-------------|\n| `ENABLE_DUO_METRICS` | \"true\" | Enable/disable Duo AI metrics pipeline |\n| `ENABLE_PROJECT_METRICS` | \"false\" | Enable/disable traditional project metrics pipeline |\n\n#### Example configurations:\n\n- **Duo Only**: `ENABLE_DUO_METRICS=\"true\"`, `ENABLE_PROJECT_METRICS=\"false\"`\n- **Both Dashboards**: `ENABLE_DUO_METRICS=\"true\"`, `ENABLE_PROJECT_METRICS=\"true\"`\n- **Disable All**: `ENABLE_DUO_METRICS=\"false\"`, `ENABLE_PROJECT_METRICS=\"false\"`\n\n### 3. Configure data ingestion\n\nConfigure the data ingestion in your `.gitlab-ci.yml` file. This creates raw project metadata shared across all metrics.\n\n#### Basic configuration variables:\n\n```yaml\nvariables:\n  GROUP_PATH: \"gitlab-org/professional-services-automation\"  # Your group path\n  INCLUDE_SIMPLE_FIELDS: \"fullPath,name,description\"  # Fields to fetch\n  ARGUMENT_FIELDS: \"mergeRequests\"  # Argument fields requiring additional config\n  LIMIT: \"100\"  # Projects per API call\n  MAX_ITERATIONS: \"2\"  # Maximum API calls (for testing)\n  GITLAB_GRAPHQL_API_VERSION: \"0.1.0\"  # API version from Package Registry\n```\n\n#### Configure argument fields for specific metrics:\n\nFor each metric you want to collect, define argument field variables:\n\n```yaml\n# Example: Merged Merge Requests\nARGUMENT_FIELD_1_NAME: \"mergeRequests\"\nARGUMENT_FIELD_1_FILTER_NAME: \"state\"\nARGUMENT_FIELD_1_FILTER_VALUE: \"merged\"\nARGUMENT_FIELD_1_RETURN_VALUES: \"count totalTimeToMerge\"\n\n# Example: Packages Count\nARGUMENT_FIELD_2_NAME: \"packages\"\nARGUMENT_FIELD_2_RETURN_VALUES: \"count\"\n```\n\n### 4. Configuring metrics aggregation\n\nAfter data ingestion, set up aggregation rules in `.gitlab/Schedule.gitlab-ci.yml` for each metric you want to generate.\n\n#### Example metric job configuration:\n\n```yaml\nprocess_average_time_to_merge:\n  \u003C\u003C: *process_data_template\n  stage: process_data\n  variables:\n    METRIC_NAME: \"9_Average_Time_To_Merge\"\n    BUSINESS_LEVEL_START: 2\n    BUSINESS_LEVEL_END: 4\n    AGGREGATE_COLUMNS: \"mergeRequests_state_merged_totalTimeToMerge:sum mergeRequests_state_merged_count:sum\"\n    NEW_COLUMN_NAME: \"average_time_to_merge_days\"\n    NEW_COLUMN_FORMULA: \"mergeRequests_state_merged_totalTimeToMerge / mergeRequests_state_merged_count / (24 * 60 * 60)\"\n    SORT_BY: \"average_time_to_merge_days\"\n    FILTER_CONDITION: \"mergeRequests_state_merged_count >= 5\"\n```\n\n    #### Required variables for each metric:\n\n| Variable | Description | Example |\n|----------|-------------|---------|\n| `METRIC_NAME` | Name of the metric (used in file naming) | \"9_Average_Time_To_Merge\" |\n| `BUSINESS_LEVEL_START` | Starting level for business hierarchy | 1 |\n| `BUSINESS_LEVEL_END` | Ending level for business hierarchy | 3 |\n| `AGGREGATE_COLUMNS` | Columns to aggregate with function | \"mergeRequests_state_merged_count:sum\" |\n| `SORT_BY` | Column to sort results by | \"average_time_to_merge_days\" |\n\n\n### 5. Run scheduled pipeline to configure analytics\n\nOnce configured, run a scheduled pipeline to generate your analytics:\n\n1. Go to **CI/CD → Schedules**\n2. Create a new schedule or run an existing one\n3. The pipeline will automatically:\n  - Ingest data based on your configuration\n  - Aggregate metrics according to your rules\n  - Deploy dashboards to GitLab Pages\n\n### 6. Access your analytics dashboards\n\nAfter the scheduled pipeline completes successfully, GitLab Pages automatically deploys your dashboards:\n\n- **Project Metrics Dashboard**: `https://your-username.gitlab.io/project-name/existing-metrics/`\n- **Duo Metrics Dashboard**: `https://your-username.gitlab.io/project-name/duo-metrics/`\n- **Main Landing Page**: `https://your-username.gitlab.io/project-name/`\n\nThe main landing page automatically detects which dashboards are available and displays appropriate links.\n\nYou'll see:\n\n- License utilization: Total licensed users vs. active users (together with code suggestion analytics)\n\n![GitLab Duo Analytics Dasboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754478265/nhbukcflhmghs5jatrip.png)\n\n\n- GitLab Duo Chat analytics: Unique Duo Chat users, average Chat events over 90 days, and Chat adoption rate\n- GitLab Duo engagement analytics: Categorizing Duo usage for a group of users as Power (10+ suggestions), Regular (5-9), or Light (1-4) based on usage patterns\n\n![Duo Chat Analytics - last 90 days](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754478265/xgq05hh2ybzb8ugsxqza.png)\n\n- Usage analytics: Code suggestions by programming language (language coverage distribution), Code suggestions language performance analytics (accepted vs rejected rate)\n\n![User adoption view of Duo analytics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754478265/mu3dx5g2l2lki2ehlr2g.png)\n\n\u003Cp>\u003C/p>\n\n\n![Language performance analytics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754478267/xf0thn8sm4dlhoyyqg9i.png)\n\n- Weekly Duo Chat trends: Duo Chat usage patterns\n\n![Duo Chat daily usage trends](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754478265/plhycnmewye3vp6vitqj.png) \n\n#### Dashboard features include:\n\n- **License Usage Analytics** - Track licensed vs active users\n- **Code Suggestions Analytics** - Monitor acceptance rates and language distribution\n- **Duo Chat Analytics** - View chat interactions and adoption rates\n- **User Engagement Analytics** - Categorize users by activity level\n- **Language Performance Analytics** - Analyze acceptance rates by programming language\n\n\n### 7. Understanding the APIs used\n\nThe solution leverages multiple GitLab APIs to collect comprehensive usage data:\n\n#### AI usage data API (aiUsageData)\n\n```graphql\n# Fetches individual code suggestion events\nquery: |\n  {\n    group(fullPath: \"your-group\") {\n      aiUsageData {\n        codeSuggestionEvents {\n          event         # ACCEPTED or SHOWN\n          timestamp     # When it happened\n          language      # Programming language\n          suggestionSize # SINGLE_LINE or MULTI_LINE\n          user { username }\n        }\n      }\n    }\n  }\n# Purpose: Tracks every code suggestion shown or accepted by developers\n```\n\n#### GitLab Self-Managed add-on users API\n\n```graphql\n# Gets licensed user information\nquery: |\n  {\n    selfManagedAddOnEligibleUsers(\n      addOnType: DUO_ENTERPRISE\n      filterByAssignedSeat: \"Yes\"\n    ) {\n      user {\n        username\n        lastDuoActivityOn\n      }\n    }\n  }\n# Purpose: Identifies who has licenses and when they last used Duo\n```\n\n#### AI metrics API\n\n```graphql\nquery: |\n  {\n    aiMetrics(from: \"2024-01-01\", to: \"2024-06-30\") {\n      codeSuggestions {\n        shownCount\n        acceptedCount\n      }\n      duoChatContributorsCount\n      duoAssignedUsersCount\n    }\n  }\n# Purpose: Gets pre-calculated metrics for trend analysis\n```\n\n#### Service Ping API (REST)\n\n```bash\nurl: \"{GITLAB_URL}/api/v4/usage_data/service_ping\"\n# Purpose: Collects instance-wide usage statistics\n```\n## Putting it all together\n\nTo demonstrate the power of this integrated analytics solution, let's walk through a complete end-to-end implementation journey — from initial deployment to fully automated ROI measurement.\n\n\nStart by deploying the containerized solution in your environment using the provided Docker configuration. Within minutes, you'll have both the analytics API and React dashboard running locally. \n\n\nThe hybrid data architecture approach immediately begins collecting metrics from your existing monthly CSV exports while establishing real-time GraphQL connections to your GitLab instance.\n\n\n**Automation through Python scripting**\n\n\nThe real power emerges when you leverage Python scripting to automate the entire data collection and processing workflow. The solution includes comprehensive Python scripts that can be easily customized and scheduled.\n\n\n**GitLab CI/CD integration**\n\n\nFor enterprise-scale automation, integrate these Python scripts into scheduled GitLab [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipelines. This approach leverages your existing GitLab infrastructure while ensuring consistent, reliable data collection:\n\n\n```yaml\n\n# .gitlab-ci.yml example\n\nduo_analytics_collection:\n  stage: analytics\n  script:\n    - python scripts/enhanced_duo_data_collection.py\n    - python scripts/metric_aggregations.py\n    - ./deploy_dashboard_updates.sh\n  schedule:\n    - cron: \"0 2 1 * *\"  # Monthly on 1st at 2 AM\n  only:\n    - schedules\n```\n\n\nThis automation strategy transforms manual data collection into a self-sustaining analytics engine. Your Python scripts execute monthly via GitLab pipelines, automatically collecting usage data, calculating ROI metrics, and updating dashboards — all without manual intervention.\n\n\nOnce automated, the solution operates seamlessly: Scheduled pipelines execute Python data collection scripts, process GraphQL responses into business metrics, and update dashboard data stores. You can watch as the dashboard populates with real usage patterns: code suggestion volumes by programming language, user adoption trends across teams, and license utilization rates that reveal optimization opportunities.\n\n\nThe real value emerges when you access the ROI Overview dashboard. Here, you'll see concrete enagagement metrics metrics which can be converted into business impact for your organisation — perhaps discovering that your active Duo users are generating 127% monthly ROI through time savings and productivity gains, while 23% of your licenses remain underutilized. These insights immediately translate into actionable recommendations: expand licenses to high-performing teams, implement targeted training for underutilized users, and build data-driven business cases for broader AI adoption.\n\n\n## Why GitLab?\n\n\nGitLab's comprehensive DevSecOps platform provides the ideal baseline for enterprise AI analytics and measurement. With native GraphQL APIs, flexible data access, and integrated AI capabilities through GitLab Duo, organizations can centralize AI measurement across the entire development lifecycle without disrupting existing workflows.\n\n\nThe solution's open architecture enables custom analytics solutions like the one developed through our Duo Accelerator program. GitLab's commitment to API-first design means you can extract detailed usage data, integrate with existing enterprise systems, and build sophisticated ROI calculations that align with your organization's specific metrics and reporting requirements.\n\n\nBeyond technical capabilities, our approach ensures you're not just implementing tools — you're building sustainable AI adoption strategies. This purpose built solution emerging from the Duo Accelerator program exemplifies this approach, providing hands-on guidance, proven frameworks, and custom solutions that address real enterprise challenges like ROI measurement and license optimization.\n\n\nAs GitLab continues enhancing native analytics capabilities, this foundation becomes even more valuable. The measurement frameworks, KPIs, and data collection processes established through custom analytics solutions seamlessly transition to enhanced native features, ensuring your investment in AI measurement grows with GitLab's evolving solution.\n\n\n## Try GitLab Duo today\n\n\nAI ROI measurement is just the beginning. With GitLab Duo's capabilities you can build out comprehensive analytics. With this you're not just tracking AI usage — you're building a foundation for data-driven AI optimization that scales with your organization's growth and evolves with GitLab's expanding AI capabilities.\n\n\nThe analytics solution developed through GitLab's Duo Accelerator program demonstrates how customer success partnerships can deliver immediate value while establishing long-term strategic advantages. From initial deployment to enterprise-scale ROI measurement, this solution provides the visibility and insights needed to maximize AI investments and drive sustainable adoption.\n\n\nThe combination of Python automation, GitLab CI/CD integration, and purpose-built analytics creates a competitive advantage that extends far beyond individual developer productivity. It enables strategic decision-making, optimizes resource allocation, and builds compelling business cases for continued AI investment and expansion.\n\n\nThe future of AI-powered development is data-driven, and it starts with measurement. Whether you're beginning your AI journey or optimizing existing investments, GitLab provides both the platform and the partnership needed to succeed.\n\n> Get started with GitLab Duo today with a [free trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/gitlab-duo/).\n",[9,738,737],"2025-08-06","2025-09-09",{"featured":6,"template":693,"slug":2535},"measuring-ai-roi-at-scale-a-practical-guide-to-gitlab-duo-analytics","content:en-us:blog:measuring-ai-roi-at-scale-a-practical-guide-to-gitlab-duo-analytics.yml","Measuring Ai Roi At Scale A Practical Guide To Gitlab Duo Analytics","en-us/blog/measuring-ai-roi-at-scale-a-practical-guide-to-gitlab-duo-analytics.yml","en-us/blog/measuring-ai-roi-at-scale-a-practical-guide-to-gitlab-duo-analytics",{"_path":2541,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2542,"content":2547,"config":2552,"_id":2554,"_type":14,"title":2555,"_source":16,"_file":2556,"_stem":2557,"_extension":19},"/en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities",{"title":2543,"description":2544,"ogTitle":2543,"ogDescription":2544,"noIndex":6,"ogImage":1623,"ogUrl":2545,"ogSiteName":706,"ogType":707,"canonicalUrls":2545,"schema":2546},"Meet GitLab Duo, the suite of AI capabilities powering your workflows","Learn about GitLab Duo, an expanding toolbox of features integrated directly into the GitLab platform to assist DevSecOps teams.","https://about.gitlab.com/blog/meet-gitlab-duo-the-suite-of-ai-capabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Duo, the suite of AI capabilities powering your workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2023-06-22\",\n      }",{"title":2543,"description":2544,"authors":2548,"heroImage":1623,"date":2549,"body":2550,"category":10,"tags":2551},[1549],"2023-06-22","\nHave you ever wanted a real-time partner alongside you to help as you develop code, improve operations, and secure your software? [GitLab Duo](https://about.gitlab.com/gitlab-duo/), available now, is a powerful set of AI capabilities within GitLab’s DevSecOps Platform that does just that – suggesting code, explaining vulnerabilities, forecasting value streams, and much more.\n\nThe name GitLab Duo is rooted in You + GitLab AI = the AI dynamic duo. GitLab Duo goes beyond just being an AI pair programmer: It is an expanding toolbox of features integrated into the DevSecOps Platform to help teams across the entire software development environment become more efficient. GitLab Duo is your go-to for planning refinement, security risk resolution, CI/CD pipeline health, and analytics charting. \n\nGitLab Duo is a customer-centric approach focused on privacy first, where customers know their intellectual property is secured.\n\n## GitLab Duo capabilities\nGitLab Duo includes:\n\n- **Code Suggestions** helps developers create new code and update existing code, reducing cognitive load, improving efficiency, and allowing them to spend more time adding unique value to their applications.\n- **Explain this Code** uses AI to examine code, both within a merge request and in the repository view, and provides a natural language explanation, helping to enable all teams to understand the code being merged.\n- **Explain this Vulnerability** helps developers write more secure code by providing a natural language description of the vulnerability and the steps to resolve it.\n- **Generate detailed descriptions of epics, issues, and tasks** helps ensure all teams are aligned and can achieve their common goals faster.\n- **Summarize Issue Comments** helps get everyone up to speed quickly in epics, issues, and tasks.  \n- **Chat** enables users to ask configuration questions and receive natural language explanations, as well as links to GitLab Docs. \n\nThroughout the year, all the GitLab Duo capabilities will become available via GitLab Duo Chat. As humans, we gravitate towards conversational chat, which is why we expect GitLab Duo Chat to become a de facto choice for how users interact with GitLab AI capabilities. We will be adding new capabilities to GitLab Duo, including assisting users with generating planning descriptions, using natural language for CI configuration and chart generation, refactoring vulnerabilities from code, suggesting a fix for failed tests, helping resolve CI/CD pipeline failures, summarizing vulnerability reports, and assisting with merge request reviews.\n\nAt GitLab, we believe everyone can contribute. By bringing GitLab Duo capabilities to every persona who uses GitLab, everyone can benefit from AI-powered workflows and organizations can ship secure software faster.\n\n## The impact of GitLab Duo on workflow efficiency\nOur goal is to help you achieve a 10x improvement in workflow efficiency by tapping into all of the [DevSecOps Platform’s AI capabilities](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/). With GitLab 16, business leaders get an enterprise-grade solution that delivers greater efficiency by reducing tool sprawl and gives teams greater visibility into their workflows.\n\nTo learn more about the exciting features and capabilities of GitLab Duo, [watch the replay of our GitLab 16 event](https://about.gitlab.com/sixteen/).\n\nStay tuned for more updates, and get ready to experience a new era of AI-powered DevSecOps workflows with [GitLab Duo](https://about.gitlab.com/gitlab-duo/).\n",[1181,717,9,716],{"slug":2553,"featured":6,"template":693},"meet-gitlab-duo-the-suite-of-ai-capabilities","content:en-us:blog:meet-gitlab-duo-the-suite-of-ai-capabilities.yml","Meet Gitlab Duo The Suite Of Ai Capabilities","en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities.yml","en-us/blog/meet-gitlab-duo-the-suite-of-ai-capabilities",{"_path":2559,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2560,"content":2565,"config":2570,"_id":2572,"_type":14,"title":2573,"_source":16,"_file":2574,"_stem":2575,"_extension":19},"/en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"title":2561,"description":2562,"ogTitle":2561,"ogDescription":2562,"noIndex":6,"ogImage":847,"ogUrl":2563,"ogSiteName":706,"ogType":707,"canonicalUrls":2563,"schema":2564},"Meet GitLab Duo Workflow - the future of AI-driven development","Workflow, our autonomous AI agent, will transform the way teams build and ship software – our first bold step towards AI-driven DevSecOps.","https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Duo Workflow - the future of AI-driven development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-06-27\",\n      }",{"title":2561,"description":2562,"authors":2566,"heroImage":847,"date":2567,"body":2568,"category":10,"tags":2569},[1549],"2024-06-27","Imagine if software wrote itself. It seems like a distant future, but with ongoing advancements in large language models and GitLab’s unified AI-powered DevSecOps platform, that future is quickly coming into focus. During our [GitLab 17 launch event](https://about.gitlab.com/seventeen/), we announced GitLab Duo Workflow, an autonomous AI agent that will transform how teams build, secure, deploy, and monitor software.\n\nGitLab Duo Workflow moves beyond the current landscape of reactive, prompt-based AI assistants by creating an autonomous team member actively contributing to optimize every aspect of your software development lifecycle. Workflow distinguishes itself by leveraging GitLab’s unified data store, which seamlessly connects all relevant data, projects, repositories, and documentation. This allows Workflow to be an intelligent, always-on agent that constantly monitors your projects, anticipates potential production issues, automatically identifies and resolves vulnerabilities, optimizes your applications for peak performance, and streamlines onboarding by rapidly building customized remote development environments.\n\nAI is transforming how secure software is created, maintained, updated, deployed, and monitored, enabling organizations to ship more software than ever before. GitLab Duo Workflow represents our first bold step towards AI-driven DevSecOps. We aim to empower developers to focus on high-level problem-solving, innovation, and value creation, while [GitLab Duo](https://about.gitlab.com/gitlab-duo/) handles repetitive tasks and optimization behind the scenes.\n\n## The vision for GitLab Duo Workflow\nWith GitLab Duo Workflow, we are laser-focused on tackling several key use cases to automate and optimize the software development process from end to end.\n### 1. Development automated\n\nStraight from the IDE, GitLab Duo Workflow helps plan and prioritize tasks tailored to individual projects and defined organizational processes. Using the requirements from a particular work item (whether an epic, issue, or task), Workflow produces an implementation plan that developers can review and refine. Then, Workflow works through the plan, generating or rewriting code to accomplish and meet the defined requirements. Workflow accomplishes this by operating within a [GitLab Remote Development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), allowing it to safely and securely evaluate, write, and test code changes. This also ensures that Workflow produces code that both meets the requirements as well as passes all CI pipeline tests, including security scans. If the pipeline fails, Workflow automatically addresses issues and troubleshoots as needed, ensuring only high-quality code that meets your organization's standards is created and committed to your projects.\n\nOnce ready, Workflow will automatically create a merge request outlining its code changes and go through your merge request approval processes, including engaging with the code reviewer or maintainer. You can even ask Workflow to review your code and have it comment on your merge requests just as a human code reviewer would today. Even better, Workflow will implement those suggestions for you if you'd like it to. And this is only the start.\n\n### 2. Intelligent continuous improvement\nGitLab Duo Workflow will analyze your codebase in real time, suggesting architectural optimizations for enhanced efficiency, performance, and cost savings. Furthermore, it will proactively identify opportunities for code refactoring to improve scalability and address technical debt by suggesting changes to developers or automatically implementing them in a sandbox environment. Additionally, Workflow will dynamically manage cloud resources to prevent overprovisioning and ensure your applications always meet their performance targets.\n\n### 3. Proactive security and compliance\nSecurity and compliance are top priorities for any organization. GitLab Duo Workflow will automatically ask developers to apply patches, refactor insecure code, and adapt to emerging threats in real time. Moreover, Workflow will continuously assess security risks associated with your applications and production environments and assist you with implementing mitigating controls.\n\n### 4. Self-optimizing performance\nGitLab Duo Workflow will incorporate sophisticated feedback loops for continuous learning and improvement. By analyzing data from monitoring tools, user interactions, and business outcomes, it will continuously refine its view of your codebase to ensure that your application architectures are always aligned with your business needs. As we see with all AI, Workflow will constantly improve, catching and fixing its own mistakes as it learns to be a partner in your organization.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/967982166?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allowfullscreen=\"true\" title=\"GitLab Duo Workflow the future of AI-driven DevSecOps\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The future of AI is now\nGitLab Duo Workflow represents an exciting leap forward, transitioning from AI that requires constant human prompting to AI that drives development workflows and processes, seeking human guidance when needed. With GitLab's unified, AI-driven interface spanning the DevSecOps lifecycle, organizations will be able to create a new generation of AI-powered applications with unparalleled speed, efficiency, and innovation, all while maintaining the highest standards of security and compliance. No tradeoffs.\n\nStay tuned for more updates and insights as we continue to push the boundaries of what's possible with AI in software development. Together, let's embrace the future of AI-driven DevSecOps and unlock the full potential of your teams and organizations.\n\n> If you are curious about AI-driven DevSecOps and want to be part of this journey, including access to the pre-release program, please [sign up for our GitLab Duo Workflow waitlist](https://forms.gle/5ppRuNVb8LwSPNVJA).",[9,496,716,1181,759],{"slug":2571,"featured":91,"template":693},"meet-gitlab-duo-workflow-the-future-of-ai-driven-development","content:en-us:blog:meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","Meet Gitlab Duo Workflow The Future Of Ai Driven Development","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"_path":2577,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2578,"content":2583,"config":2587,"_id":2589,"_type":14,"title":2590,"_source":16,"_file":2591,"_stem":2592,"_extension":19},"/en-us/blog/merge-request-changes-summary-ai",{"title":2579,"description":2580,"ogTitle":2579,"ogDescription":2580,"noIndex":6,"ogImage":868,"ogUrl":2581,"ogSiteName":706,"ogType":707,"canonicalUrls":2581,"schema":2582},"ML experiment: Summarize merge request changes","Learn how GitLab is experimenting with ML-powered merge request changes summarization in this sixth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/merge-request-changes-summary-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarize merge request changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":2579,"description":2580,"authors":2584,"heroImage":868,"date":2106,"body":2585,"category":10,"tags":2586},[1493],"\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\nIn a rapid prototype, [Kerri Miller](https://gitlab.com/kerrizor), Staff Backend Engineer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI to summarize the merge request changes directly within the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/). She developed a `/summarize_diff` quick action to post a summary of changes into a comment:\n\n![Merge request summary via AI](https://about.gitlab.com/images/blogimages/merge-request-changes-summary-ai.gif){: .shadow}\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with providing complete summaries of what changes a merge request makes, and are beginning to look at more targeted flows to enhance the review cycle experience. Current areas we're investigating include providing:\n\n- Summaries of what's changed between each review cycle in a merge request.\n- Summaries of review feedback to merge request authors.\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[917,738,717,9],{"slug":2588,"featured":6,"template":693},"merge-request-changes-summary-ai","content:en-us:blog:merge-request-changes-summary-ai.yml","Merge Request Changes Summary Ai","en-us/blog/merge-request-changes-summary-ai.yml","en-us/blog/merge-request-changes-summary-ai",{"_path":2594,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2595,"content":2600,"config":2605,"_id":2607,"_type":14,"title":2608,"_source":16,"_file":2609,"_stem":2610,"_extension":19},"/en-us/blog/merge-request-suggest-a-test",{"title":2596,"description":2597,"ogTitle":2596,"ogDescription":2597,"noIndex":6,"ogImage":1394,"ogUrl":2598,"ogSiteName":706,"ogType":707,"canonicalUrls":2598,"schema":2599},"ML experiment: Generate tests for code changes","Learn how GitLab is experimenting with ML-powered test suggestions in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/merge-request-suggest-a-test","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Generate tests for code changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-04-27\",\n      }",{"title":2596,"description":2597,"authors":2601,"heroImage":1394,"date":2602,"body":2603,"category":10,"tags":2604},[1493],"2023-04-27","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nProposing changes and new features via merge requests is great, but what about the tests? Sometimes, tests can be the hardest part of any code change you make. Maybe you're not sure how to start writing the tests? Maybe the test doesn't cover all the scenarios that need to be tested? Maybe you just want to get a second opinion on the tests that were written? We believe that we can use generative AI and large language models (LLMs) to help provide relevant test coverage for the proposed changes, so reviewers and authors can have confidence in the quality of code changes being submitted.\n\nIn a rapid prototype, [Phil Hughes](https://gitlab.com/iamphill), Staff Frontend Engineer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI to generate suggested test coverage for changes directly in the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/). He added a new option on merge request files to provide suggested tests in a sidebar:\n\n![Merge request test generation AI](https://about.gitlab.com/images/blogimages/merge-request-generate-tests-ai.gif){: .shadow}\n\n## Iterating on AI/ML features\n\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're beginning by generating these test suggestions, and seeking ways to incorporate them into the review flow. We're exploring ideas like:\n\n- Automatic detection of missing tests, with suggestions to add coverage\n- Automated review of the proposed tests in the merge request, for appropriateness and completeness\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":2606,"featured":6,"template":693},"merge-request-suggest-a-test","content:en-us:blog:merge-request-suggest-a-test.yml","Merge Request Suggest A Test","en-us/blog/merge-request-suggest-a-test.yml","en-us/blog/merge-request-suggest-a-test",{"_path":2612,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2613,"content":2618,"config":2623,"_id":2625,"_type":14,"title":2626,"_source":16,"_file":2627,"_stem":2628,"_extension":19},"/en-us/blog/ml-experiment-sql",{"title":2614,"description":2615,"ogTitle":2614,"ogDescription":2615,"noIndex":6,"ogImage":868,"ogUrl":2616,"ogSiteName":706,"ogType":707,"canonicalUrls":2616,"schema":2617},"ML experiment: Writing SQL is about to get a lot easier","Learn how GitLab is experimenting with ML-powered product features in this third installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/ml-experiment-sql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Writing SQL is about to get a lot easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-03-30\",\n      }",{"title":2614,"description":2615,"authors":2619,"heroImage":868,"date":2620,"body":2621,"category":10,"tags":2622},[1455],"2023-03-30","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nSQL, the structured query language, has long been the backbone of data analysis and manipulation. But let's face it, not everyone is an SQL wizard. For many, writing even a simple SQL query can be a daunting task, let alone tackling more advanced queries. Even experienced data analysts spend lots of time and effort writing and debugging complex queries just to answer simple business intelligence questions.\n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n## AI-assisted SQL generation\nAt GitLab, we’re experimenting with AI-assisted SQL generation in our [Product Analytics group](https://docs.gitlab.com/ee/user/product_analytics/). This area is focused on helping users understand and gain insights from usage patterns. GitLab Product Analytics can track events within your project applications, which enables you to explore your data and generate dashboards with interactive graphs and charts. You can use our visual designer or YAML to define them, and we envision it becoming even easier with AI assistance. You can learn more about our Product Analytics plans in our [sneak peek blog post](/blog/introducing-product-analytics-in-gitlab/).\n\nIn a simple experiment, our own [Tim Zallmann](https://gitlab.com/timzallmann), Senior Director of Engineering, prototyped leveraging AI-generated queries from simple natural language parsing. The results quickly showcase how powerful using natural language can be to help generate the SQL to populate the Product Analytics dashboards. \n\n![Animated gif image of SQL generation](https://about.gitlab.com/images/blogimages/sql-query-generation-lg.gif){: .shadow}\n\nAbove, you can see an example of how we're using natural language to generate SQL queries to power dashboard charts and graphs. You can watch the full demo in the video below. \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/q3HZy0P0ugw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. This experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll be sharing more of these demos in this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[917,738,717,9],{"slug":2624,"featured":6,"template":693},"ml-experiment-sql","content:en-us:blog:ml-experiment-sql.yml","Ml Experiment Sql","en-us/blog/ml-experiment-sql.yml","en-us/blog/ml-experiment-sql",{"_path":2630,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2631,"content":2636,"config":2641,"_id":2643,"_type":14,"title":2644,"_source":16,"_file":2645,"_stem":2646,"_extension":19},"/en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops",{"title":2632,"description":2633,"ogTitle":2632,"ogDescription":2633,"noIndex":6,"ogImage":868,"ogUrl":2634,"ogSiteName":706,"ogType":707,"canonicalUrls":2634,"schema":2635},"Modern software development problems require modern AI-powered DevSecOps","Learn how applying AI can help solve common development challenges.","https://about.gitlab.com/blog/modern-software-development-problems-require-modern-ai-powered-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modern software development problems require modern AI-powered DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2023-09-07\",\n      }",{"title":2632,"description":2633,"authors":2637,"heroImage":868,"date":2638,"body":2639,"category":10,"tags":2640},[2473],"2023-09-07","\nThe landscape of software development is already being impacted by the integration of AI tools. GitLab is making that impact a positive one. Infusing AI throughout the software development lifecycle lowers the barrier for everyone to contribute. Let's take a look how organizations can evolve the idea _\"every company is a software company\"_ into _\"every company should be an AI-first company\"_ and how this transformation can happen by embracing AI-powered DevSecOps.\n\nAn effective way to demonstrate how AI can be a game-changer for software development teams is to walk through a detailed scenario. In this example, you'll see how applying [GitLab Duo](https://about.gitlab.com/gitlab-duo/), the suite of AI capabilities powering workflows, to a dilemma that many organizations face – how to automate complex customer-facing tools – can improve the experience for DevSecOps teams and customers alike.\n\n## Challenge: Keeping customer Q&A systems from going stale\nImagine a large financial services firm has an extensive FAQ section on its website but keeping that information up to date and easy to navigate has become increasingly difficult. Also, the FAQs don't cover queries that are less frequent but should be easy to respond to.\n\n![FAQ](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/faq-improved.gif)\n\nTo resolve this, the company proposed building an AI-enabled Q&A system that not only answers common queries but also other finance-related questions. The process to build the AI-enabled system described above can look like this:\n\n![process](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/process-demo.png)\n\n### Problem definition and collaboration\nThe journey begins with a GitLab issue. This is where the team collaborates, discusses ideas, and brainstorms to define the problem and scope of work. Collaborating on solving the Q&A system problem results in a lengthy issue thread where it can be challenging to quickly understand the status of the work. Anyone new to the issue or even current participants may need to spend considerable time reading comments to get an overview of the problem status and the decisions that have been made. AI-driven text summarization simplifies understanding of the comments, allowing developers and business teams to align on the problem and requirements.\n\n![AI-driven text summary](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/summary-hd.png)\n\nText summarization applied to issue comments \n{: .note.text-center}\n\nGitLab provides an AI-generated summary right in the issue where all the collaboration took place, thus helping development teams to get an overview of the main points and takeaways. The AI-generated summary enables developers and other team members to quickly extract and review key points from the issue without having to read through long conversations.\n\n### Solution development\nThe proposed solution involves leveraging a large language model (LLM) to build a Q&A system. The AI-generated summary allows developers to quickly break down the requirements and store them in GitLab using [Requirements management](https://docs.gitlab.com/ee/user/project/requirements/).  The requirements are stored as easy-to-understand items with their own criteria of success and expected behavior as depicted in the figure below:\n\n![requirements](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/requirements.png)\n\nAI Q&A system requirements in GitLab \n{: .note.text-center}\n\nHaving clear requirements is key to starting implementation of a successful solution, creating a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/), and iterating on the solution to the problem.\n\nWhile working towards a solution, GitLab Duo Code Suggestions can help developers focus. Leveraging generative AI, Code Suggestions provides relevant suggestions reducing time spent on repetitive or common code. This frees up developers to focus their efforts on complex problems that require deeper understanding of the codebase and business logic. \n\n![code-suggestions](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/code-suggestion.png)\n\nGenerated code speeds up development\n{: .note.text-center}\n\nDevelopers get code suggestions inline in their integrated development environment, or IDE, as they type or after prompting the model with code comments. The suggestions are predicted based on the code written before the cursor and the file extension of the script.\n\n### Efficient code review\nLet's consider that, until this point, developers working on the AI-enabled Q&A system have committed code contributions to build the solution. And it is also evident that infusing AI into the software development lifecycle has allowed the team to move faster. To move faster without compromising quality, however, it’s important to carry out code reviews on all the contributions done so far by the development team. Code reviews help developers share knowledge and maintain high-quality software. Companies like the one in this scenario might have a large number of developers, and identifying suitable reviewers can be challenging and time consuming. To resolve this, the company can leverage [GitLab Duo Suggested Reviewers](https://about.gitlab.com/gitlab-duo/). This feature employs machine learning to suggest the most relevant code reviewers, streamlining and speeding up the review process. \n\n![suggested-reviewers](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggested-review-sa.png)\n\nSuggested reviewers appear in the reviewer dropdown\n{: .note.text-center}\n\nThe machine learning algorithm that powers Suggested Reviewers learns from the project’s contribution graph data; the suggestions it generates are contextual to the changes made in the code.   \n\n### Code review summarization and test generation\nAI-driven text summarization helps in understanding suggested code changes derived from the code review at a glance. In our scenario, once the appropriate reviewer has been chosen, this person can go through the code and submit comments, propose changes, or identify potential edge cases that may have been overlooked. Getting a quick summary of all the suggested code changes before diving into the details can potentially speed up software delivery.\n\n![mr-summary](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/MR-Summary.png)\n\nCode Review summary\n{: .note.text-center}\n\nAs part of the review, it is important to make sure that all the functions have been tested. If there was the case of an overlooked test, once again it is possible to leverage generative AI within GitLab to create [test cases](https://docs.gitlab.com/ee/ci/test_cases/).\n\n![suggest-test-cases](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggest-test-cases.png)\n\nTest cases generated by AI\n{: .note.text-center}\n\nIn the figure below, we see the test code generated by AI. This provides helpful assertions that can also serve as an inspiration for discovering edge cases that might have not been considered before.\n\n![test cases](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/test-cases-created.png)\n\nSuggested test cases\n{: .note.text-center}\n\nThis solidifies the quality of the project codebase, closing the loop of efficient code reviews.\n\n### Minimizing context switching\nAllowing developers to obtain guidance and answers to GitLab-related questions without leaving the platform can be achieved by interacting with GitLab Duo Chat functionality.This AI-powered feature reduces context switching and enables developers or other team members to ask open questions and request information using natural language. Let's assume that a developer wasn't exactly sure how to leverage AI to generate test cases like the ones requested during the code review. Using GitLab Duo Chat, the developer can ask how to achieve exactly that without having to leave the platform, as depicted in the figure below:\n\n![GitLab Duo Chat](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/chat.png)\n\nGitLab Duo Chat\n{: .note.text-center}\n\nInteracting with the platform using natural language provides a convenient way to achieve results and get things done while minimizing context switching, improving the software delivery experience and speed. Now, speaking of speed, let's dive into the realm of continuous integration/continuous delivery and deployment (CI/CD).\n\n### GitLab CI/CD \nThe AI-infused DevSecOps approach described so far aligns and enhances CI/CD methodologies. The financial services company in this scenario uses these continuous methodologies to deliver software faster since these are proven ways to automatically build, test, secure, and deploy software.\n\n![chatbot-pipeline](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/pipelines.png)\n\nGitLab CI/CD pipeline\n{: .note.text-center}\n\nThe figure above depicts the CI/CD pipeline that is triggered and used in the development of our LLM Financial Q&A system. The jobs it executes are:\n\n- Build: creates the container with its respective code and dependencies\n- Test: unit testing and security scanners, which help ensure that no code vulnerabilities are being introduced \n- Deploy: puts the solution in the hands of end users\n\nOnce the solution is deployed, end users can ask questions to it, and the FAQ problem can be considered solved.\n\n![chatbot](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/chatbot.gif)\n\nThe AI touchpoints explored in this scenario and that led to the faster development of our AI-powered Q&A system are seamlessly integrated within DevSecOps methodologies, enhancing the development and operations experience.\n\n![devsecops-ml](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/devsecops-embedded.png)\n\nUltimately, this increases the likelihood of a successful and secure deployment of the AI-powered Q&A system.\n\n### Recap\nGoing back to the process overview, in the following table we see how the different AI touchpoints are used throughout the software development lifecycle.\n\n|  | What AI-powered DevSecOps can do for you | Feature |\n| ------ | ------ | ------ |\n|![brainstorming](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/brainstorm.png) | AI generates a summary of the discussions and provide the most relevant information for your team to move forward in the solution development   |[Summarize issues](https://docs.gitlab.com/ee/user/ai_features.html#summarize-issue-discussions)       |\n|![requirements](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/solution-requirements.png)       | The team agrees on a chatbot and break down the application requirements and set criteria it must fulfill       |  [Requirements management](https://docs.gitlab.com/ee/user/project/requirements/)       |\n|![develop](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/develop.png)        | Developers leverage Code Suggestions to speed up code and generate tests       |   [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html)     |\n|![reviewer](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/suggested-reviewer.png)        | Quickly find the most suitable person to review code changes and move on in the delivery process       |  [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/data_usage.html#:~:text=Suggested%20Reviewers%20is%20the%20first,and%20GitLab%20user%20account%20metadata.)      |\n|![code-review](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/review.png)        |  Summarize Merge Request, which includes code summary, helps contributors understand the most important changes that need to be addressed      | [Summarize Merge Request](https://docs.gitlab.com/ee/user/ai_features.html#summarize-merge-request-changes)      |\n|![pipeline](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/buildtest.png)        | CI/CD and integrated security       |   [CI/CD testing](https://docs.gitlab.com/ee/ci/testing/)    |\n|![deploy](https://about.gitlab.com/images/blogimages/2023-07-24-modern-problems-modern-devsecops/deploy.png)        | Kubernetes agent, and multi-cloud deployments      |  [Kubernetes integration](https://docs.gitlab.com/ee/user/clusters/agent/)      |\n\nWatch the video walkthrough of the financial services scenario:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LifJdU3Qagw?si=fpfvuhJQ_jYmYrT2\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAs organizations embark on their AI journey, more and better software will have to be created. Drawing insights from DevSecOps combined with AI-powered workflows provides a solid foundation to enjoy the benefits of AI, unlock new use cases, and provide robust solutions that learn from data. Ultimately, AI enables organizations to enhance software development practices and to tackle modern problems with modern solutions.\n\nGet started with GitLab Duo today with this [free trial of GitLab Ultimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=localhost%2Fsolutions%2Fai%2F).\n",[716,496,9],{"slug":2642,"featured":6,"template":693},"modern-software-development-problems-require-modern-ai-powered-devsecops","content:en-us:blog:modern-software-development-problems-require-modern-ai-powered-devsecops.yml","Modern Software Development Problems Require Modern Ai Powered Devsecops","en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops.yml","en-us/blog/modern-software-development-problems-require-modern-ai-powered-devsecops",{"_path":2648,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2649,"content":2655,"config":2660,"_id":2662,"_type":14,"title":2663,"_source":16,"_file":2664,"_stem":2665,"_extension":19},"/en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"title":2650,"description":2651,"ogTitle":2650,"ogDescription":2651,"noIndex":6,"ogImage":2652,"ogUrl":2653,"ogSiteName":706,"ogType":707,"canonicalUrls":2653,"schema":2654},"Modernizing a simple C++ application to Java with GitLab Duo","Learn how to refactor code from memory unsafe languages to memory safe languages with the help of GitLab's AI capabilities, saving time and effort on application modernization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modernizing a simple C++ application to Java with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-12-11\",\n      }",{"title":2650,"description":2651,"authors":2656,"heroImage":2652,"date":2657,"body":2658,"category":10,"tags":2659},[733],"2024-12-11","Memory unsafe languages are those that do not handle any memory management\non behalf of the developer. For example, when programming in C or C++, if\nyou need memory during runtime, you will need to allocate and deallocate the\nmemory yourself, running the risk of ending up with memory leaks in cases\nwhen you inadvertently forget to deallocate it. Other languages like Ada and\nFORTRAN provide some memory management but may not prevent memory leaks.\nMany organizations, including those in the public sector, have applications\nthat have been developed using languages that are memory unsafe and are\noften looking to modernize these to a memory safe language, such as Java,\nPython, JavaScript, or Golang.\n\n\nThis tutorial focuses on a specific example of modernizing a simple C++\napplication to Java by refactoring it with the help of [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities,\nand shows how much time and effort you can save in the migration.\n\n\n## Understanding the simple C++ application\n\n\nLet’s make the assumption that we have been tasked with the migration of a\nC++ application to a memory safe language, namely Java. The C++ application\ncan be found in the following project (thank you to\n[@sugaroverflow](https://gitlab.com/sugaroverflow) for contributing this\nsample application):\n\n\n[https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application)\n\n\nSince this is the first time we are seeing this application, let’s invoke\nGitLab Duo Code explanation to better understand what it does. We open file\n`main.cpp` in Visual Studio Code and select the entirety of this file. We\nthen right-click and select **GitLab Duo Chat > Explain selected snippet**\nfrom the popup menu.\n\n\n![duo-code-explanation-menu-option](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675546/Blog/Content%20Images/code-explanation-menu-option.png)\n\n\nThe GitLab Duo Chat window opens up and the slash command `/explain` is\nexecuted for the selected code. Chat returns a very thorough and detailed\ndescription and explanation in natural language form of what each function\ndoes in the file as well as examples on how to run the compiled program.\n\n\n![code-explanation-text](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/code-explanation-text.png)\n\n\nIn short, the simple C++ application takes a U.S. zip code as input and\nreturns the air quality index for that zip code.\n\n\n## Compiling and running the C++ application\n\n\nTo further understand this simple C++ application, we proceed to compile and\nrun it. We could have asked Chat how to do this, however, the project has a\nREADME file that provides the commands to compile the project, so we go\nahead and use those by entering them in the Terminal window of VS Code.\n\n\n![compile-command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/compile-command.png)\n\n\nAfter the compilation finishes, we change directory to the `build`\nsubdirectory in the project, which is where the compilation process places\nthe executable file for this application. Then, we run the executable by\nentering the following command:\n\n\n`./air_quality_app 32836`\n\n\nAnd we see the response as follows:\n\n\n`Air Quality Index (AQI) for Zip Code 32836: 2 (Fair)`\n\n\n![cplus-plus-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/cplus-plus-app-execution-output.png)\n\n\nThis confirms to us that the application was successfully compiled and it’s\nexecuting appropriately.\n\n\n## Refactoring the application to Java\n\n\nLet’s start migrating this C++ application to Java. We take advantage of\nGitLab Duo Chat and its refactoring capabilities by using the slack command\n`/refactor`. We qualify the slash command with specific instructions on what\nto do for the refactoring. We enter the following command in the Chat input\nfield:\n\n\n> /refactor this entire application to Java. Provide its associated pom.xml\nto build and run the Java application. Also, provide the directory structure\nshowing where all the resulting files should reside for the Java\napplication.\n\n\n![refactor-chat-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/refactor-chat-output.png)\n\n\nChat returns a set of Java files that basically refactor the entire C++\napplication to the memory safe language. In addition and per the prompt,\nChat returns the pom.xml file, needed by\n[maven](https://docs.gitlab.com/ee/api/packages/maven.html) for the building\nand execution of the refactored application as well as its directory\nstructure, indicating where each generated file should reside.\n\n\nWe copy and save all the generated files to our local directory.\n\n\n## Creating the Java project\n\n\nIn VS Code, we now proceed to open an empty project in which we will set up\nthe directory structure of the new Java application and its contents.\n\n\nWe create all the previously generated Java files in their corresponding\ndirectories in the new project and paste their contents in each.\n\n\n![java-files-created](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-files-created.png)\n\n\nLastly, we save all the files to our local disk.\n\n\n## Asking for help to build and run the Java application\n\n\nAt this point, we have an entire Java application that has been refactored\nfrom C++. Now, we need to build it but we don’t quite remember what maven\ncommand we need to use to accomplish this.\n\n\nSo we ask GitLab Duo Chat about this. We enter the following prompt in the\nChat input field:\n\n\n> How do you build and run this application using maven?\n\n\n![maven-info-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/maven-info-output.png)\n\n\nChat returns with a thorough explanation on how to do this, including\nexamples of the maven command to build and run the newly created Java\napplication.\n\n\n## Building and running the Java application\n\n\nGitLab Duo Chat understands the application and environment context and\nresponds that we first need to create an environment variable called\n`API_KEY` before we can run the application.\n\n\nIt also provides the maven command to execute to build the application,\nwhich we enter in the Terminal window:\n\n\n```unset\n\nmvn clean package\n\n``` \n\n\n![java-build-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-build-output.png)\n\n\nOnce the build finishes successfully, we copy the generated command to run\nthe application from the Chat window and paste it in the Terminal window:\n\n\n```unset\n\njava -jar target/air-quality-checker-1.0-SNAPSHOT-jar-with-dependencies.jar\n90210\n\n```\n\n\n![java-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-app-execution-output.png)\n\n\nThe application successfully executes and returns the string:\n\n\n```unset\n\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n\n```\n\n\nWe have confirmed that the modernized version of the application, now\nrefactored in Java, runs just like its original C++ version.\n\n\n## Watch this tutorial in action\n\n\nWe have seen that by leveraging the power of GitLab Duo in your\nmodernization activities, you can save a great deal of time and effort,\nfreeing you to spend more time innovating and creating value to your\norganization.\n\n\nHere is a video to show you, in action, the tutorial you just read:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LJ7GOr_P0xs?si=_ZjF75DAXEQnY2Mn\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n> #### Want to get started with GitLab Duo? [Start a free trial\ntoday!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)\n\n\n## Learn more\n\n- [Refactor code into modern languages with AI-powered GitLab\nDuo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n\n- [Secure by Design principles meet DevSecOps innovation in GitLab\n17](https://about.gitlab.com/blog/secure-by-design-principles-meet-devsecops-innovation-in-gitlab-17/)\n\n- [How to secure memory-safe vs. manually managed\nlanguages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/)\n",[9,496,737,186],{"slug":2661,"featured":6,"template":693},"modernizing-a-simple-c-application-to-java-with-gitlab-duo","content:en-us:blog:modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","Modernizing A Simple C Application To Java With Gitlab Duo","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"_path":2667,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2668,"content":2673,"config":2679,"_id":2681,"_type":14,"title":2682,"_source":16,"_file":2683,"_stem":2684,"_extension":19},"/en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge",{"title":2669,"description":2670,"ogTitle":2669,"ogDescription":2670,"noIndex":6,"ogImage":1913,"ogUrl":2671,"ogSiteName":706,"ogType":707,"canonicalUrls":2671,"schema":2672},"Navigating the AI frontier: Lessons from the cutting edge","Discover key insights on AI development, from rapid prototyping to production, evaluation frameworks, and emerging industry trends.","https://about.gitlab.com/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Navigating the AI frontier: Lessons from the cutting edge\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michelle Gill\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-09-10\",\n      }",{"title":2669,"description":2670,"authors":2674,"heroImage":1913,"date":2676,"body":2677,"category":10,"tags":2678},[2675,1220],"Michelle Gill","2024-09-10","As AI continues to evolve at a breakneck pace, developers and organizations are grappling with how to effectively integrate it into their workflows and products. At GitLab, we're always looking to stay at the forefront of these developments to better serve our community. Recently, our team attended the [AI Engineer World's Fair](https://www.ai.engineer/worldsfair), which provided valuable insights into the current state of [AI development](https://about.gitlab.com/gitlab-duo/). Here's what we learned.\n\n## Insights on AI development\n\n### The traditional development lifecycle is gone\n\nThe advent of AI has dramatically altered the traditional software development lifecycle. While AI prototypes can be created in minutes, advancing them to production takes significantly longer than before. This shift requires a rethink of our development approach, including:\n\n- streamlining processes where possible\n- adding new steps to accommodate AI-specific requirements\n- embracing rapid iteration and rewriting over multiple cycles\n\n### Speed is key\n\nIn the fast-paced world of AI, speed of iteration is crucial. Some key takeaways:\n\n- If productization takes more than three months, the product may be outdated by the time it's ready.\n- Aim to try more things faster than competitors.\n- Reduce iteration time and accelerate collaboration.\n- Implement systems that de-risk getting things wrong and allow for fearless changes.\n\n### AI development requires new, practical methods\n\nAI demands new ways of approaching development, including the following:\n\n1. Prioritize user experience early\n- Frontload user testing and prepare for evaluations early in the design phase.\n- Validate product needs with the best available model.\n- Consider trade-offs between achievability and value when selecting use cases.\n\n2. Take an iterative approach\n\n- Identify your base model (choose the best available).\n- Start your prompt template (involve domain experts and product managers).\n- Identify your data selection strategy.\n- Iterate on components separately and evaluate each step.\n\n3. Develop prompt engineering best practices\n\n- Standardize around a single query language.\n- Ask the large language model (LLM) to write code to solve problems rather than solving them directly.\n- Automate what you can to reduce degrees of freedom.\n- Use code for deterministic tasks outside the AI chain.\n- Break problems into smaller, more manageable pieces.\n- Leverage technical writers and domain experts for prompt crafting.\n\n### Evaluations need more time\n\nThe \"Great Eval Problem\" has flipped the traditional development timeline, with evaluations now taking a majority of the time. With LLMs, we have replaced the need for sophisticated model development approaches with an API call to a third party. However, we still require a significant amount of time to evaluate the responses. To address this:\n\n- Incorporate evaluations at every level using multiple techniques.\n- Evaluate different aspects at different stages (local, pre-production, production).\n- Focus on end-to-end evaluations that measure end-user value.\n- Consider user-centric evaluations and let features \"die with UX\" if necessary.\n\n### The customer PoV must be top of mind\n\nCustomer-centric considerations should be your focus. Here's how:\n\n- Maintain developer \"flow\" and reduce context switching.\n- Provide transparency into AI inputs and outputs.\n- Ease users into natural language interactions.\n- Consider human-in-the-loop approaches for complex tasks.\n\n### AI engineer roles are changing, pay attention\n\nAs the industry matures, the role of AI engineers is becoming more defined:\n- Requires production experience and product development competency.\n- Requires engaging with various personas (ML engineers, software engineers, domain experts).\n- Demands strong data intuition and the ability to extract meaning from data.\n\n## Looking ahead\n\nThe AI landscape continues to evolve rapidly. Some trends to watch:\n\n- unification of prompts across models\n- advancements in evaluation and prompt generation tools\n- the rise of \"slop\" (unrequested and unreviewed AI-generated content)\n- movement towards inline code completion and autonomous agents\n- improvements in fine-tuning, RAG workflows, and managed agents\n\nAs we navigate this exciting and rapidly changing field, it's crucial to stay informed, adapt quickly, and always keep the end user in mind. At GitLab, we're committed to incorporating these insights into our development processes and sharing our learnings with the community.\n\nWe encourage our developers and the wider community to explore these concepts further and contribute to the ongoing dialogue around AI development best practices. Together, we can shape the future of AI-driven software development.\n\n> ## Learn more about AI and DevSecOps\n> - Visit our [GitLab Duo site](https://about.gitlab.com/gitlab-duo/)\n> - Read our [\"Developing GitLab Duo\"](https://about.gitlab.com/blog/developing-gitlab-duo-series/) behind-the-scenes series\n> - Bookmark our [AI/ML blog page](https://about.gitlab.com/blog/categories/ai-ml/)\n",[9],{"slug":2680,"featured":6,"template":693},"navigating-the-ai-frontier-lessons-from-the-cutting-edge","content:en-us:blog:navigating-the-ai-frontier-lessons-from-the-cutting-edge.yml","Navigating The Ai Frontier Lessons From The Cutting Edge","en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge.yml","en-us/blog/navigating-the-ai-frontier-lessons-from-the-cutting-edge",{"_path":2686,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2687,"content":2692,"config":2698,"_id":2700,"_type":14,"title":2701,"_source":16,"_file":2702,"_stem":2703,"_extension":19},"/en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops",{"title":2688,"description":2689,"ogTitle":2688,"ogDescription":2689,"noIndex":6,"ogImage":2176,"ogUrl":2690,"ogSiteName":706,"ogType":707,"canonicalUrls":2690,"schema":2691},"New report on AI-assisted tools points to rising stakes for DevSecOps","Read the key findings from the \"Omdia Market Radar: AI-Assisted Software Development, 2023-24\" report, including the state of AI-based code assistants.","https://about.gitlab.com/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New report on AI-assisted tools points to rising stakes for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rusty Weston, Guest Contributor\"}],\n        \"datePublished\": \"2024-02-14\",\n      }",{"title":2688,"description":2689,"authors":2693,"heroImage":2176,"date":2695,"body":2696,"category":10,"tags":2697},[2694],"Rusty Weston, Guest Contributor","2024-02-14","Small wonder that the buzz about deploying generative AI and large language models (LLMs) for code completion and code generation has focused almost exclusively on developer productivity. It's a significant milestone — but it’s not the entire story. Less widely understood is what AI-assisted tools can do for development teams and, more broadly, for organizational competitiveness. Combining AI-powered tools and integrated development environments (IDEs) doesn’t just pump up developer efficiency, it transforms the entire software development lifecycle (SDLC) while adding “layers” of safety enhancements. \n\nDevSecOps teams see firsthand that AI-assisted software tools help reduce software testing bottlenecks and improve security as they streamline workflows. In this new era, DevSecOps can simultaneously shorten the software development cycle, enforce security standards, and enhance output. In short, the right tools make organizations more competitive. \n\nJust as LLM quality improvements amplify the value of generative AI, the new class of AI-powered development tools must offer privacy- and transparency- controls to harness these models effectively. Utilizing rigorous controls, DevSecOps gains efficiencies and improves team collaboration while reducing AI adoption's security and compliance risks.\n\n## An analyst take on what matters  \nOne of the key findings of a new report called “[Omdia Market Radar: AI-Assisted Software Development, 2023–24](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai)” is that “the use of AI-based code assistants has reached a level of proficiency such that enterprises not using this technology will be at a disadvantage.”\n\n> [Read the Omdia Market Radar report](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai). \n\nFew may have anticipated the development community’s swift integration of AI-powered application development. Until recently, it’s been a gradual build. According to Omdia, “The application of AI to code assistance has been ongoing for the last decade with a focus on assisting professional developers.” After years of development, the report emphasizes that “this technology is now a permanent part of the landscape.”\n\nOmdia’s finding also tracks with the [GitLab 2023 Global DevSecOps Report: The State of AI in Software Development](https://about.gitlab.com/developer-survey/), which featured input from 1,000 global leaders in development, IT operations, and security. Today, nearly one-in-four DevSecOps teams have adopted AI tools, and another two-thirds plan to use AI in software development. In the GitLab report, more than half (55%) of teams heralded the promise of improved efficiency. At the same time, two in five respondents expressed concerns about whether AI-generated code may introduce security vulnerabilities.\n\n## Advocating a layered approach \n\nGiven potential risks such as LLM inaccuracy,  including widely documented [hallucinations](https://www.fastcompany.com/91006321/how-ai-companies-are-trying-to-solve-the-llm-hallucination-problem), Omdia cautions brands that “careless use of LLM output could harm and tarnish” their reputation. “To increase the accuracy of this technology and ensure that developers can use this technology safely and without violating license rules in the data used to train the models, there is a need to add layers on top of the foundation model.”  \n\nBy layers, Omdia emphasizes the value of “safety and enhancement” safeguards and filters. These layers create a “major differentiator” for AI-assisted development tools because they manage “training data licensing rules, the quality and accuracy of the generated output, and the prevention of insecure code.” The report's authors caution that “generated outputs need to be carefully evaluated” to ensure they are “safe and of high quality.”\n\nIn effect, the safeguards and filters in AI-assisted software development establish a “defense-in-depth” strategy for coding. That’s a concept in which “[attacks missed by one technology are caught by another](https://csrc.nist.gov/glossary/term/defense_in_depth),” which can also apply to any elevated digital risk, such as reputational harm. \n\n## A new perspective on GitLab Duo\n\nOmdia highlighted [GitLab Duo](https://about.gitlab.com/gitlab-duo/), the company’s suite of AI capabilities, as one of the products it considers “suitable for enterprise-grade application development,” noting that its “AI assistance is integrated throughout the SDLC pipeline.” \n\nAmong the report highlights:\n- “GitLab places an emphasis on respecting user privacy and being transparent in how it operates. In its selection of AI technology, it is agnostic to the models adopted and will use what it considers the best model for each use case.”  \n- “When GitLab looked at where developers were spending their time, it was only 25% on coding, and 75% was taken up by other necessary tasks: planning, onboarding, testing, documentation, and security. Therefore, GitLab applies AI to all these tasks, not just code generation assistance.”\u2028\n- “To ensure privacy, GitLab does not let its AI retain user data in any way and does not use client code to train its models.”\n- GitLab’s AI gateway is model agnostic, and “GitLab uses models from Google and [Anthropic](https://about.gitlab.com/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation/) to power GitLab Duo.”\u2028\n- Beyond code suggestions, developers “can ask Duo Explanation using natural language to explain what the code does.”\n\n## GitLab Duo introduces stronger controls\n\nFor DevSecOps teams, there’s no tradeoff between efficiency and security. Both are essential. GitLab Duo includes vital features such as Duo Code Suggestions and Chat, which enable AI-powered code completion, code generation, and chat, improving collaboration between developers, security, and operations teams. \n\nWith GitLab Duo, customer privacy is never subjected to tradeoffs. All customer code stays secret — it’s never applied to model training or fine-tuning. These practices are core to GitLab’s privacy- and transparency-first approach to team collaboration and security and reduce AI adoption's compliance risks. \n\nThe Omdia report notes that “software developers face greater complexity and hurdles today in producing code.” As a result, “There is a need to build in application security, including enforcing standards and triaging security vulnerabilities.” The report finds that GitLab has “security guardrails consistently applied throughout.” \n\nAdopters need tools that can help them tap AI’s benefits without introducing vulnerabilities or undermining compliance standards in ways that jeopardize trust with customers, partners, employees, and other critical stakeholders. DevSecOps teams seek tools to reduce the time, stress, and complexity of the entire application lifecycle.\n\n> [Read the Omdia Market Radar report](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai).\n\n_Rusty Weston is an award-winning data-driven storyteller, editor, researcher, and writer. He formerly served as Editor of InformationWeek.com, Managing Editor at Yahoo!, and Vice President and Managing Editor for the Ogilvy content team._ \n",[9,716,1181],{"slug":2699,"featured":91,"template":693},"new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops","content:en-us:blog:new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops.yml","New Report On Ai Assisted Tools Points To Rising Stakes For Devsecops","en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops.yml","en-us/blog/new-report-on-ai-assisted-tools-points-to-rising-stakes-for-devsecops",{"_path":2705,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2706,"content":2712,"config":2719,"_id":2721,"_type":14,"title":2722,"_source":16,"_file":2723,"_stem":2724,"_extension":19},"/en-us/blog/pipelines-as-code",{"title":2707,"description":2708,"ogTitle":2707,"ogDescription":2708,"noIndex":6,"ogImage":2709,"ogUrl":2710,"ogSiteName":706,"ogType":707,"canonicalUrls":2710,"schema":2711},"Pipelines-as-Code: How to improve speed from idea to production","Pipelines-as-Code streamline automatic building, testing, and deploying of applications using prebuilt pipelines and infrastructure components. Here's how it works.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/pipelines-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pipelines-as-Code: How to improve speed from idea to production\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Williams\"}],\n        \"datePublished\": \"2022-01-18\",\n      }",{"title":2707,"description":2708,"authors":2713,"heroImage":2709,"date":2715,"body":2716,"category":1078,"tags":2717},[2714],"Robert Williams","2022-01-18","\nToday’s DevOps platform-centric world is moving steadily towards an \"Everything-as-Code\" mentality. Add in cloud native, and it's clearly even more important to standardize how you define your DevOps processes.\n\n## Why ‘as-Code’?\n\nThanks to faster iteration, cloud native computing, and [microservices-based architectures]\n(https://about.gitlab.com/topics/microservices/), as-Code technologies have become the de-facto standard for a lot of different parts of the software development lifecycle. \n\nThe need to release faster requires a single spot for teams to collaborate on any kind of change – code, infrastructure, configuration, networking, or testing. And to implement that change quickly we need to be able to see and review it before it goes into production. \n\nAs-Code solutions are at the core of cloud native technologies such as Kubernetes, where you utilize YAML or JSON formats to configure and manage. Here are the key advantages of 'as-Code':\n\n- auditability\n- scalability\n- efficiency\n- collaboration\n\nThese benefits come into play with every piece of technology that moves into as-Code; we have seen it time and again as DevOps processes mature and we automate each piece of the software development lifecycle. Here are the critical 'as-Code' stages: \n\n### Build-as-Code\n\nOne of the first steps when building a new pipeline is to implement a way to build your application automatically. Containerization is one of the most common ways: You define your build steps as a Dockerfile and then you have automated the build of the application.\n\n### Test-as-Code\n\nAs our deployment frequency and team size scales, the need for test cases to be automated scales as well. So we automate, we write unit tests and test scripts to execute unit tests, and then we ensure the changes can be continuously integrated safely, without introducing unplanned bugs.\n\n### Security-as-Code\n\nTo ensure software gets to market quickly, security must be included in your testing process. The testing has to happen either through tools integrated with each individual project, or implemented as code, creating job templates for security scanners that can be ingested by projects as required. These steps enable teams to quickly become compliant with various security frameworks (like PCI-DSS, HIPAA,,or ISO) as they become relevant for the project.\n\n### Deployment-as-Code\n\nDeployments need to be standardized so they are predictable every time. To ensure successful peer review, production and development environment deployments need to be the same, and there's an added bonus of a quality gate between them. Through scripting and implementation of Deployment-as-Code, we end up with the ability to continuously deploy code and continuously deliver value.\n\n## Why Pipelines-as-Code?\n\nPipelines are the center of the CI/CD workflow – they're the automation heart that powers all of the benefits of as-Code technologies. Once you have the Build-as-Code, Test-as-Code, Deployment-as-Code, Infrastructure-as-Code, and Configuration-as-Code, you have all the parts needed to ensure that you can reliably and predictably take your application into production environments. But, to move changes in with agility, you need to take all those parts and string them together into a pipeline.\n\nThe technology behind Pipelines-as-Code makes it possible to create centralized repositories for your organization's pipelines. Pipelines-as-Code can be set up to fit all boxes for varied languages and use cases (like [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)) or with a [number of options](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) so that developers can pick base pipelines to fit their use case. It's important to have a baseline that conforms to the organization's standards because that always increases the speed to production.\n\nThe entire team can collaborate on changes to each part of the workflow. Version history can be easily maintained in the same version control system as everything else that touches the DevOps lifecycle.\n\nThe benefits of as-Code technology reach a pinnacle with Pipelines-as-Code, so teams gain increases in efficiency, scalability, auditability, and collaboration. Pipelines-as-Code are at the center of automated GitOps, DevOps, and SecOps workflows.\n",[9,2718,917],"cloud native",{"slug":2720,"featured":6,"template":693},"pipelines-as-code","content:en-us:blog:pipelines-as-code.yml","Pipelines As Code","en-us/blog/pipelines-as-code.yml","en-us/blog/pipelines-as-code",{"_path":2726,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2727,"content":2733,"config":2738,"_id":2740,"_type":14,"title":2741,"_source":16,"_file":2742,"_stem":2743,"_extension":19},"/en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"title":2728,"description":2729,"ogTitle":2728,"ogDescription":2729,"noIndex":6,"ogImage":2730,"ogUrl":2731,"ogSiteName":706,"ogType":707,"canonicalUrls":2731,"schema":2732},"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI ","Shorten your mean time to remediation by pairing Advanced SAST and artificial intelligence. This detailed demo shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098458/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_24mPf16vAPHORs3d9y62q_1750098458538.png","https://about.gitlab.com/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-10-22\",\n      }",{"title":2728,"description":2729,"authors":2734,"heroImage":2730,"date":2735,"body":2736,"category":10,"tags":2737},[1436],"2024-10-22","With GitLab 17.4, we’ve made [GitLab Advanced SAST generally\navailable](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/).\n[GitLab Advanced\nSAST](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html)\nis a static application security testing scanner designed to discover\nvulnerabilities by performing cross-function and cross-file taint analysis.\nBy following the paths user inputs take, the analyzer identifies potential\npoints where untrusted data can influence the execution of your application\nin unsafe ways, ensuring the vulnerabilities are detected even when they\nspan multiple functions and files.\n\n\nGitLab Advanced SAST can be used together with [GitLab Duo Vulnerability\nExplanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability)\nin order to reduce the mean time to remediation (MTTR). GitLab Duo can\nprovide practical, AI-powered examples of how threat actors can exploit\nvulnerabilities and offer light-weight remediation guidance, which can be\nused with cross-file analysis to enhance application security (AppSec)\nefficiency.\n\n\nThis tutorial will show you how to:\n\n* enable GitLab Advanced SAST\n\n* read results from the scanner\n\n* review the code flow of a vulnerability\n\n* use GitLab AI to quickly remediate the vulnerability\n\n\n## Enable GitLab Advanced SAST\n\n\nFollow the instructions below to enable GitLab Advanced SAST. You can also\nview this video to get started:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDa1MHOcyn8?si=5SYuKgP-BdBryqcU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Run GitLab Advanced SAST on each code commit\n\n\nBefore using Advanced SAST, the following prerequisites must be met:\n\n\n- GitLab Ultimate Subscription ([free\ntrial](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F))\n\n- GitLab SaaS or GitLab Self-managed (running Version 17.4)\n\n\nTo enable the GitLab Advanced SAST scanner:\n\n\n- On the left sidebar, select **Search** or **Go to** and find your project.\n\n- Add or edit the `.gitlab-ci.yml` to include the following:\n    - Test stage\n    - `Jobs/SAST.gitlab-ci.yml` template\n    - `GITLAB_ADVANCED_SAST_ENABLED` variable set to true\n- Apply the change.\n\n\nYour newly merged `.gitlab-ci.yml` should contain the following:\n\n\n```yaml\n\nstages:\n  - test\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\nvariables:\n  GITLAB_ADVANCED_SAST_ENABLED: 'true'\n```\n\n\nThis will now run the `gitlab-advances-sast` job within the test stage of\nyour application along with all the other jobs you have defined. Advanced\nSAST will replace the semgrep SAST scanner for the [supported programming\nlanguages](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html#supported-languages).\n\n\n![Running `gitlab-advances-sast` job within the test stage of your\napplication](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098466/Blog/Content%20Images/Blog/Content%20Images/1_aHR0cHM6_1750098466629.png)\n\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST job in pipeline\u003C/i>\u003C/center>\n\n\n\u003Cbr>\u003C/br>\n\n\n**Note:** You can fully configure the job as you would any job in GitLab.\nFor more information, see the [CI/CD YAML syntax\ndocumentation](https://docs.gitlab.com/ee/ci/yaml/).\n\n\n## Remediate vulnerabilities in merge request (pre-production)\n\n\nJust like our previous SAST scanner, Advanced SAST allows you to scan source\ncode in the diff of a feature branch. This allows us to address any incoming\nvulnerabilities before they make it into production. Here we can see the\nscanner results for the diff within a merge request:\n\n\n![Advanced SAST scanner results for the diff within a merge\nrequest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/2_aHR0cHM6_1750098466630.png)\n\n\nWhen selecting a newly detected vulnerability, we get the following details\nto assist with remediation:\n\n\n- **Status:** The status of the vulnerability (Needs triage, Confirmed,\nDismissed, Resolved)\n\n- **Description:** Detailed information on the detected vulnerability\n\n- **Detection time:** Time vulnerability was detected\n\n- **Location:** Line of code where vulnerability is detected\n\n- **Severity:** Severity of vulnerability from CVE database\n\n- **Training:** Gamified training from our partners\n\n- **Solutions:** Information on how to remediate or resolve a vulnerability\n\n- **Identifiers:** Relevant links showcasing detailed description,\nexploitation, and remediation\n\n\n![Merge request with vulnerability\ninsights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/MR_with_vulnerability_insights_aHR0cHM6_1750098466632.png)\n\n\n\u003Ccenter>\u003Ci>Merge request with vulnerability insights\u003C/i>\u003C/center>\n\n\n\u003Cbr>\u003C/br> \n\nVulnerabilities detected within an MR are actionable, meaning they can be\ndismissed or an issue can be created and populated with relevant\nvulnerability information.\n\n\nDismissing an issue saves AppSec teams time, because they can see relevant\ndeveloper information when reviewing an MR. Creating a confidential issue\nallows developers and AppSec teams to further collaborate on resolving a\nvulnerability where a fix is not straightforward. Confidential issues have\nlimited permissions and can be used with confidential merge requests to\nprevent possible malicious actors from exploiting.\n\n\nTo further support separation of duties and prevent vulnerable code from\nmaking it into production, you can require approval from certain people (for\nexample, the security team) in order to merge vulnerable code.\n\n\n![GitLab security policies in\naction](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/security_policies_in_action_aHR0cHM6_1750098466634.png)\n\n\n\u003Ccenter>\u003Ci>Security policies in action\u003C/i>\u003C/center>\n\n\n\u003Cbr>\u003C/br>\n\n\n**Note:** Learn more about Security Policies and how to implement them in\nthe [Security Policy\ndocumentation](https://docs.gitlab.com/ee/user/application_security/policies/).\n\n\n## Manage vulnerabilities in production\n\n\nWhile preventing vulnerabilities from making it into production is crucial\nfor application security, it is equally as important to manage\nvulnerabilities in production. When security scanners are run on a default\nor production-level branch, a [vulnerability\nreport](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\nwill be populated with the latest vulnerability data which can be used to\ntriage and manage vulnerabilities.\n\n\n![GitLab Vulnerability Report sorted by Advanced\nSAST](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/5_aHR0cHM6_1750098466636.png)\n\n\n\u003Ccenter>\u003Ci>GitLab Vulnerability Report sorted by Advanced SAST\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n\nWhen selecting a vulnerability you get similar vulnerability details as seen\nin a merge request, making for a single source of truth for developers and\nAppSec teams.\n\n\n![Vulnerability page with vulnerability\ninsights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/Vuln_page_with_vulnerability_insights_aHR0cHM6_1750098466637.png)\n\n\n\u003Ccenter>\u003Ci>Vulnerability page with vulnerability insights\u003C/i>\u003C/center>\n\n\n\u003Cbr>\u003C/br>\n\n\nAppSec teams can triage a vulnerability by changing its status and adding\nrelevant details on the status change. Issues can be created to track the\nprogress of a fix. From here, a developer can be assigned.\n\n\n## Examine vulnerable code flow\n\n\nFor vulnerabilities detected with Advanced SAST, we can see a \"Code flow\"\ntab on the Vulnerability page.\n\n\n![Advanced SAST - image\n7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/7_aHR0cHM6_1750098466638.png)\n\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST code flow\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n\nIn this example, you can see that a vulnerability is traced across multiple\nfunctions, giving deeper insight into the best practices we should put in\nplace to not only resolve the vulnerability, but prevent similar\nvulnerabilities in the future.\n\n\n## Use GitLab Duo Vulnerability Explanation\n\n\nGitLab Duo can help you mitigate or remediate a vulnerability by using a\nlarge language model to:\n\n\n- Summarize the vulnerability\n\n- Help developers and security analysts understand the vulnerability\n\n- Show how the vulnerability can be exploited\n\n- Provide a suggested remediation or mitigation\n\n\nTo use Vulnerability Explanation, the following is required:\n\n\n- GitLab Ultimate subscription\n\n- GitLab Duo Enterprise seat\n\n- GitLab Duo must be enabled for your group or instance\n\n\nFrom the vulnerability report, you can select a SAST vulnerability and go to\nits Vulnerability page. From the Vulnerability page, you can do any of the\nfollowing to explain the vulnerability:\n\n\n- Select the text below the vulnerability description\n\n- You can use AI by asking GitLab Duo Chat to explain this vulnerability and\noffer a suggested fix.\n\n- In the upper right, from the \"Resolve with merge request\" dropdown list,\nselect **Explain Vulnerability**, then select **Explain vulnerability**.\n\n- Open GitLab Duo Chat and use the explain a vulnerability command:\n`/vulnerability_explain`.\n\n\nThen the vulnerable code will be processed by Anthropic’s Claude 3 Haiku\nmodel and provide the following data:\n\n\n![GitLab Duo Vulnerability\nExplanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/vuln_explain_2_aHR0cHM6_1750098466640.png)\n\n\n## Putting it all together\n\n\nNow, let's put it all together with a concrete example. I will use the\n[OWASP Juice Shop](https://owasp.org/www-project-juice-shop/) as my demo\napplication and run GitLab Advanced SAST to detect a vulnerability in\nproduction. Then I will use the vulnerability code flow and GitLab Duo to\ninvestigate vulnerability exploitation, and remediation. You can [follow\nalong with this\ndemo](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop)\nand see this workflow in action by watching:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/H1S43oM44k0?si=2LYorTjByOHbCAko\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nThe detection and remediation workflow is as follows:\n\n\n- Enable GitLab Advanced SAST and run it on the project’s default branch.\n\n- Open the Vulnerability Report and sort by **Tool:GitLab Advanced SAST**.\n\n- Select the **Improper neutralization of special elements in data query\nlogic** vulnerability found in `Basket.ts`.\n\n- Use the vulnerability code flow to understand the vulnerable paths.\n\n- Run **Explain this vulnerability** to see exploit information.\n\n- Run the application locally to attempt exploitation.\n\n- Change vulnerability status to \"Confirmed\" and provide relevant info.\n\n- Determine remediation path using all relevant data:\n    - Vulnerability page insights, Code Flow, Vulnerability Explanation results\n- Create a new branch and apply remediation.\n\n- Run the remediated application locally and try to exploit again.\n\n- Create a merge request with the fix.\n\n- Code change will be tested using CI to assure we don’t break the\napplication.\n\n- Validate and merge MR.\n\n- Test exploit in deployed environment.\n\n- Change vulnerability status to \"Resolved\" on the Vulnerability page.\n\n\n**Note:** There are many ways to triage and remediate vulnerabilities, make\nsure to follow best practices set by your organization.\n\n\n# Useful links\n\n\nTo learn more about GitLab and how you can get started with enhancing your\norganization’s application security posture, check out the following\nresources.\n\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) \n\n* [GitLab Duo](https://about.gitlab.com/gitlab-duo/)  \n\n* [GitLab Security and Compliance\nSolutions](https://about.gitlab.com/solutions/security-compliance/)  \n\n* [GitLab Software Supply Chain Security\nSolutions](https://about.gitlab.com/solutions/supply-chain/)  \n\n* [GitLab Continuous Software\nCompliance](https://about.gitlab.com/solutions/continuous-software-compliance/)  \n\n* [JuiceShop Demo\nApplication](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop)  \n\n* [GitLab AppSec\ndocumentation](https://docs.gitlab.com/ee/user/application_security/)  \n\n* [Advanced SAST \ndocumentation](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html)  \n\n* [Explain this Vulnerability\ndocumentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability)  \n\n* [Code Flow\ndocumentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-code-flow)  \n\n* [Security Policy\ndocumentation](https://docs.gitlab.com/ee/user/application_security/policies/) \n\n* [OWASP Juice Shop\ndocumentation](https://owasp.org/www-project-juice-shop/)\n",[9,691,737,717,496],{"slug":2739,"featured":91,"template":693},"quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","content:en-us:blog:quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","Quick Vulnerability Remediation With Gitlab Advanced Sast Duo Ai","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"_path":2745,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2746,"content":2752,"config":2756,"_id":2758,"_type":14,"title":2759,"_source":16,"_file":2760,"_stem":2761,"_extension":19},"/en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"title":2747,"description":2748,"ogTitle":2747,"ogDescription":2748,"noIndex":6,"ogImage":2749,"ogUrl":2750,"ogSiteName":706,"ogType":707,"canonicalUrls":2750,"schema":2751},"Quickly resolve broken CI/CD pipelines with AI","When your CI/CD pipeline fails, it leads to delays, decreased productivity, and stress. AI-powered Root Cause Analysis makes problem-solving faster and smarter.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097355/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097355230.png","https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickly resolve broken CI/CD pipelines with AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":2747,"description":2748,"authors":2753,"heroImage":2749,"date":1951,"body":2754,"category":10,"tags":2755},[774],"CI/CD pipelines are the backbone of efficiency in software development. They help teams test, build, and deploy code quickly. But when these pipelines break, everything slows down — deadlines get missed, and developers are left frustrated as they work to fix things and keep projects on track.\n\n![CI/CD pipeline with multiple failed jobs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097362/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097362772.png)\n\n\u003Ccenter>\u003Ci>CI/CD pipeline with multiple failed jobs\u003C/i>\u003C/center>\u003Cbr>\u003C/br>\n\n**So, why do pipelines break in the first place?** Let’s break it down.\n\n## Reasons for pipeline failures\n\nA pipeline failure occurs when the automated workflow in your [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) — a series of steps that can include building, testing, and deploying code — does not execute as expected and ends with an error message. This failure can prevent code from being properly built, tested, or deployed, causing delays in software delivery and requiring troubleshooting to resolve. \n\nPipeline failures can happen for a variety of reasons. Some common causes include:\n- Syntax errors: A small mistake in the code, like a missing semicolon or incorrect variable name, can cause the pipeline to fail.\n- Failed tests: Unit or integration tests might fail due to broken code, incorrect configurations, or mismatched dependencies.\n- Misconfigurations: Incorrect pipeline settings or environment configurations can lead to failed builds or deployments.\n\nThere are also more complex issues that add to the challenge:\n- Infrastructure-as-Code ([IaC](https://about.gitlab.com/topics/gitops/infrastructure-as-code/)) issues: Problems in provisioning cloud infrastructure, such as errors in Terraform scripts or CloudFormation templates, can prevent a successful deployment.\n- Kubernetes and GitOps challenges: Misconfigurations in [Kubernetes clusters](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/) or issues with [GitOps](https://about.gitlab.com/topics/gitops/) workflows (e.g., syncing Kubernetes states with Git repositories) can cause pipeline failures that are difficult to diagnose.\n- Long, messy stack traces: When an error occurs deep in the system, stack traces can become long and hard to decipher, especially when they span multiple components or services.\n\nThese challenges make troubleshooting more difficult and time-consuming, as finding the root cause often involves sifting through complex logs, reviewing configuration files, and testing different solutions.\n\n## The real impact of failed pipelines\n\nWhen a pipeline fails, it doesn’t just delay your deployment — it brings stress and frustration. Developers are forced to pause their work and dive into troubleshooting, which often leads to a chain reaction of disruptions. This makes it harder to meet deadlines and increases the pressure on the entire team. But why is manual troubleshooting so stressful?\n\n### Manual troubleshooting \n\nThe time it takes to fix a broken pipeline varies. It depends on things like:\n- How well the developer knows the project\n- How experienced they are with similar issues\n- Their overall problem-solving skills\n\nManually digging through logs to figure out what went wrong is a tough and tedious process. Logs can come from all over the place, including application errors and system messages, and they’re often messy and hard to interpret. And on top of that, fixing the pipeline usually requires a lot of jumping back and forth between tasks, adding more time to the process.\n\nThis is where [GitLab Duo](https://about.gitlab.com/gitlab-duo/) comes in. GitLab Duo can sift through all that messy data and spot issues much faster, simplifying the process so you don’t need to be an expert to figure out what went wrong. With AI, fixing your pipelines becomes faster, easier, and much less stressful.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176104/Blog/zxvvu7p9vc3qpmwl32ya.png\" alt=\"broken pipeline\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176108/Blog/bpx6dqilfhltzboyp8k8.png\" alt=\"fix suggestions for broken pipelines\">\n\n## GitLab Duo Root Cause Analysis with generative AI\n\nWhen your CI/CD pipeline breaks, you don’t have to spend hours manually troubleshooting. Enter [GitLab Duo’s Root Cause Analysis (RCA)](https://docs.gitlab.com/ee/user/gitlab_duo/#root-cause-analysis). This AI-powered tool quickly identifies the exact cause of the failure and suggests fixes — right within the DevSecOps platform. No matter how long or complicated your stack traces are, RCA analyzes all the data, breaks it down, and gives you clear, actionable insights.\n\n**It tells you exactly what caused the error, provides steps to fix it, and even pinpoints the specific files and lines of code that need attention.** And, to make it even easier, it suggests code fixes to get everything back on track. This makes troubleshooting a lot faster and more straightforward.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176111/Blog/nmagby9hoksskogve53m.png\" alt=\"root cause of failure\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176115/Blog/dndis1cedwbmbnj33q3v.png\" alt=\"example fix\">\n\n## Keep the conversation going with follow-up questions\n\nWith GitLab Duo RCA, you don’t just get answers — you can ask follow-up questions to dig deeper. Want to explore alternative solutions? No problem. You can add [more context](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of) by referencing other files, issues, or epics in your repo. For example, you could open your `.gitlab-ci.yml` file in the IDE and ask the chat, “Based on this file, and the analyzed CI/CD pipeline, how would you propose to optimize the pipeline?” \n\n## Privacy first – everything stays in GitLab\nOne of the key benefits of GitLab Duo RCA is that it works right out of the box within GitLab. You won’t have to switch tools or go hunting for external help. Plus, your [logs and sensitive data stay secure](https://about.gitlab.com/privacy/) - there’s no need to send them off to external AI solutions. RCA is seamlessly integrated within GitLab, offering valuable insights without ever compromising privacy.\n\n![broken pipelines - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097362773.png)\n\n## Get started today\n\nWant to see how AI can supercharge your development process, making it smoother and faster? Dive into our GitLab Duo Enterprise product tour below and discover how GitLab Duo’s AI-powered insights can transform every stage of your development journey — from planning and coding to troubleshooting and deployment. Click the image below to start the tour!\n\n[![GitLab Duo Enterprise tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-12-02_at_12.41.10_PM_aHR0cHM6_1750097362774.png)](https://gitlab.navattic.com/duo-enterprise)\n\n> [Start a free trial of GitLab Duo today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)",[9,496,737,717],{"slug":2757,"featured":6,"template":693},"quickly-resolve-broken-ci-cd-pipelines-with-ai","content:en-us:blog:quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","Quickly Resolve Broken Ci Cd Pipelines With Ai","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"_path":2763,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2764,"content":2770,"config":2774,"_id":2776,"_type":14,"title":2777,"_source":16,"_file":2778,"_stem":2779,"_extension":19},"/en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"title":2765,"description":2766,"ogTitle":2765,"ogDescription":2766,"noIndex":6,"ogImage":2767,"ogUrl":2768,"ogSiteName":706,"ogType":707,"canonicalUrls":2768,"schema":2769},"RAIL-M is an imperfectly good start for AI model licenses","\"GitLab, Inc. is dedicated to open source and AI. This is our take on a model license relevant to open source and AI communities: the BigScience Open RAIL-M license.\"","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671836/Blog/Hero%20Images/railmimage.jpg","https://about.gitlab.com/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"RAIL-M is an imperfectly good start for AI model licenses\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robin Schulman\"}],\n        \"datePublished\": \"2023-07-25\",\n      }",{"title":2765,"description":2766,"authors":2771,"heroImage":2767,"date":2030,"body":2772,"category":10,"tags":2773},[2340],"GitLab, Inc. is dedicated to open source – we believe in it, use it, and give back to it, and we have an [open core](https://about.gitlab.com/company/stewardship/) business model. We also care deeply about artificial intelligence (AI) – we recently announced that we are investing heavily in AI by [infusing it into every phase of our comprehensive DevSecOps platform](https://about.gitlab.com/solutions/ai/).\n\nWe were thus very interested to see [Responsible AI Licenses'](https://www.licenses.ai/) recent release of a model license relevant to both the open source and AI communities: the BigScience Open RAIL-M license ([RAIL-M](https://www.licenses.ai/blog/2022/8/26/bigscience-open-rail-m-license)).\n\nWe see RAIL-M as an exciting but flawed development in the AI model licensing space. Its authors’ intentions are admirable and important, but in practical terms, RAIL-M still has room for improvement.\n\n### What is RAIL-M?\nRAIL-M is part of the Open Responsible AI Licenses (Open RAIL) [family](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), which is a collection of AI licenses that aim to promote responsible use by imposing behavioral use restrictions on the model’s licensees and downstream users. The Open RAIL family is not alone – it’s within a new wave of licenses (see, for example, the [TII Falcon LLM License](https://huggingface.co/tiiuae/falcon-40b/blob/main/LICENSE.txt)) spawned by the public’s recent interest in AI. RAIL-M specifically applies these use restrictions to the model (the “M” in RAIL-M stands for “model”).\n\nThis blog discusses RAIL-M specifically, and does not consider the other licenses in the Open RAIL family.\n\n### RAIL-M puts ethics at the forefront\nIn recent months, much ink has been spilled over the novel ethical dilemmas that AI presents. Technologists, journalists, and companies alike have sounded the alarm on the various societal harms that AI could exacerbate (see, for example, OpenAI, Google Deepmind, and other AI companies’ [recent open letter](https://www.nytimes.com/2023/05/30/technology/ai-threat-warning.html) declaring that AI poses a “risk of extinction”). Regulators are taking notice. Recently, OpenAI’s CEO Sam Altman testified in a Senate hearing on AI’s risks, and a key committee of European Parliament lawmakers [approved the EU AI Act](https://www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a-step-closer-to-the-first-rules-on-artificial-intelligence), which aims to mitigate AI’s potential harms.\n\nRAIL-M places these ethical considerations front and center. Its restrictions prohibit the AI model’s licensees and their downstream users from engaging in potentially harmful uses such as applying the model in a way that violates applicable law, to provide medical advice, or to harass or defraud others.\n\nThese provisions’ practical implications are, admittedly, still a bit unclear. Regardless, these use-based restrictions will, at the very least, deter some from applying the model in harmful ways, and help push ethical considerations to the forefront of today’s fast-paced AI landscape. In the words of the Organisation for Economic Co-operation and Development ([OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license)): “OpenRAILs are a vehicle towards the consolidation of an informed and respectful culture of sharing AI artifacts acknowledging their limitations and the values held by the licensors of the model.”\n\n### In practice, RAIL-M isn’t perfect\nFirst, describing RAIL-M as an “open” license – as RAIL-M’s authors have in its title – is misleading. RAIL-M’s authors conflate royalty-free access and flexible use and re-distribution with truly “open” licenses. The Open Source Initiative ([OSI](https://opensource.org/osd/)) defines “open source” as software that, among other qualities, “must not restrict anyone from making use of the program in a specific field of endeavor.” RAIL-M’s use-based restrictions – which include prohibitions on providing medical advice, and generating information to be used for the administration of justice or law enforcement – prevent it from being a truly “open” license.\n\nSecond, regulators such as those in the EU will likely pass laws imposing certain use restrictions on AI tools in the near future. RAIL-M doesn’t cover how its own use-based requirements will interact with AI-related laws, which may present an issue if, for example, a RAIL-M restriction conflicts with one of these new regulations.\n\nFinally, commentators, including [Kyle Mitchell](https://writing.kemitchell.com/2023/01/26/Open-RAIL-M-Unclear) and [Luis Villa](https://blog.tidelift.com/evaluating-the-rail-license-family), have also expressed concerns that some of RAIL-M’s requirements may be too vague to comply with.\n\n### A net benefit to the AI community\nRAIL-M isn’t perfect. However, setting aside its practical flaws, RAIL-M’s release is still an important signal both to and from the AI community that AI ethics matter and must be considered even (and perhaps especially) when offering free, publicly-available models. To again quote [the OECD](https://oecd.ai/en/catalogue/tools/bigscience-openrail-m-license): “[l]icenses [like those in the Open RAIL family] … should not be conceived as burdensome legal technical mechanisms, but rather as a communication instrument among AI communities bringing stakeholders together by sharing common messages on how the licensed artifact can be used.”\n\nRAIL-M, and the Open RAIL family as a whole, will likely encourage the AI community – both AI model maintainers and perhaps even proprietary model creators – to consider, and work to mitigate, their models’ potential harms and abuses. We’ll be interested to see where it goes.\n\n_Cover image by [Google DeepMind](https://unsplash.com/@deepmind?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ZJKE4XVlKIA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,1082],{"slug":2775,"featured":6,"template":693},"rail-m-is-an-imperfectly-good-start-for-ai-model-licenses","content:en-us:blog:rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","Rail M Is An Imperfectly Good Start For Ai Model Licenses","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses.yml","en-us/blog/rail-m-is-an-imperfectly-good-start-for-ai-model-licenses",{"_path":2781,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2782,"content":2787,"config":2792,"_id":2794,"_type":14,"title":2795,"_source":16,"_file":2796,"_stem":2797,"_extension":19},"/en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"title":2783,"description":2784,"ogTitle":2783,"ogDescription":2784,"noIndex":6,"ogImage":949,"ogUrl":2785,"ogSiteName":706,"ogType":707,"canonicalUrls":2785,"schema":2786},"Refactor code into modern languages with AI-powered GitLab Duo ","This detailed tutorial helps developers use AI to modernize code by switching to a new programming language and gain knowledge about new features in the same language.","https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactor code into modern languages with AI-powered GitLab Duo \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":2783,"description":2784,"authors":2788,"heroImage":949,"date":2789,"body":2790,"category":10,"tags":2791},[1262],"2024-08-26","Whether you are tasked with modernizing the code base or framework by\nswitching to a new programming language, or you need knowledge about new\nlanguage features in the same language, AI-powered [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) can help. Learn how to approach\ncode refactoring challenges with best practices using examples from the past\n20 years of my coding career. \n\n\nThe prompts and examples in this article are shown in different IDEs: VS\nCode and JetBrains IDEs (IntelliJ IDEA, PyCharm, and CLion) with the [GitLab\nDuo\nextensions/plugins](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html)\ninstalled. The development environment uses GitLab.com, including updates to\nAnthropic Claude 3.5 as Large Language Model (LLM) for GitLab Duo [Code\nSuggestions](https://docs.gitlab.com/ee/user/gitlab_duo/#code-suggestions)\nand [Chat](https://docs.gitlab.com/ee/user/gitlab_duo/#gitlab-duo-chat).\nSpoiler: They are even more powerful and efficient.\n\n\nYou can navigate into each section of the article, or read top-down. The\nsource code and challenges with exercises are provided for self-learning,\ntoo.\n\n\n- [Refactor code to modern programming language\nstandards](#refactor-code-to-modern-programming-language-standards)\n    - [Generate Java 7 and refactor to Java 8](#generate-java-7-and-refactor-to-java-8)\n    - [Refactor across C++ standards](#refactor-across-c%2B%2B-standards)\n        - [Migration: Refactor C++03 into C++14](#migration-refactor-c%2B%2B03-into-c%2B%2B14)\n        - [Downgrade: Refactor C++23 to C++11](#downgrade-refactor-c%2B%2B23-to-c%2B%2B11)\n    - [Explain and refactor COBOL](#explain-and-refactor-cobol)\n- [Refactor a language into another\nlanguage](#refactor-a-language-into-another-language)\n    - [Refactor C to Rust](#refactor-c-to-rust)\n    - [Refactor Perl to Python](#refactor-perl-to-python)\n- [More refactoring hands-on](#more-refactoring-hands-on)\n    - [Refactor JavaScript](#refactor-javascript)\n    - [Refactor Bash to ZSH or SH](#refactor-bash-to-zsh-or-sh)\n    - [More use cases and tutorials](#more-use-cases-and-tutorials)\n- [Key takeaways](#key-takeaways)\n\n\n## Refactor code to modern programming language standards\n\n\nThe next sections show how to refactor, explain, and understand the language\nspecific differences in a defined programming language. \n\n\n### Generate Java 7 and refactor to Java 8 \n\n\nA customer wanted to know if GitLab Duo supports Java 7. While testing the\nCode Suggestions and Chat prompts to generate Java 7 specific code, I also\nthought of migration help. Anthropic Claude 3.5 is capable of understanding\nthe language-specific differences, and so is GitLab Duo.\n\n\nThe following example shows a prompt to generate Java 7 code with Code\nSuggestions.\n\n\n```java\n\n// Generate a new class for a File Manager. Use Java 7 features only.\n\nimport java.io.*;\n\nimport java.nio.file.*;\n\nimport java.util.*;\n\n\npublic class FileManager {\n    private Path rootDirectory;\n\n    public FileManager(String rootPath) throws IOException {\n        this.rootDirectory = Paths.get(rootPath);\n        if (!Files.exists(rootDirectory)) {\n            Files.createDirectories(rootDirectory);\n        }\n    }\n\n    public void createFile(String fileName, String content) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.write(filePath, content.getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n    }\n\n    public String readFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return new String(Files.readAllBytes(filePath));\n    }\n\n    public void deleteFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.delete(filePath);\n    }\n\n    public List\u003CString> listFiles() throws IOException {\n        List\u003CString> fileList = new ArrayList\u003C>();\n        try (DirectoryStream\u003CPath> stream = Files.newDirectoryStream(rootDirectory)) {\n            for (Path file : stream) {\n                fileList.add(file.getFileName().toString());\n            }\n        }\n        return fileList;\n    }\n\n    public void copyFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.copy(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public void moveFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.move(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public long getFileSize(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.size(filePath);\n    }\n\n    public boolean fileExists(String fileName) {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.exists(filePath);\n    }\n}\n\n```\n\n\nYou can continue with the following steps in your IDE:\n\n\n1. Learn and understand the generated code: Select the code in the editor\n(blue color in the screenshot), switch into GitLab Duo Chat and send the\n`/explain` slash command as prompt.\n\n\n![Validate the generated code with the `/explain` slash command in GitLab\nDuo\nChat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellij_java7_generate_refactor.png)\n\n\n2. Refactor the Java 7 code into Java 8: Select the code in the editor,\nswitch into Chat, and send `/refactor using Java 8 features` as refined\nprompt.\n\n3. Practice an alternative to refactoring: Create a new file `java8.java`\nand generate Java 8 specific code using Code Suggestions with the prompt `//\nGenerate a new class for a File Manager. Use Java 8 features only.`.\n\n4. Continue writing Java 8 code in the same context with AI-powered code\ncompletions.\n\n\nYou can watch all steps in this recording.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XKRv6uBkD2I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nThe source code is available in the [GitLab Duo Challenge - Generate and\nrefactor Java 7 to 8\nproject](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-generate-refactor-java-7-to-8).\n\n\n### Refactor across C++ standards\n\n\nThe C++ standard has a long history and can be considered stable and mature.\nDepending on the target architecture and platforms, the supported standard\ndepends on the compiler on the host operating system.\n\n\nIn my career, I needed to support 25 different Linux and Windows\ndistributions in open source projects, and have seen varying compiler\nsupport for newer language features and C++ standards. In 2015, we settled\non C++11 features, and later never touched based on C++14 or C++17. \n\n\nRefactoring functions and algorithms to a new C++ standard would have\nrequired too much effort, and help explaining the differences was needed.\nBack then, we researched through books, online search, Stack Overflow, and\nsometimes tools assistance for refactoring.\n\n\nThings have changed with AI-powered assistants and GitLab Duo: You can\ninstruct Code Suggestions and Chat to provide source code in a specific C++\nstandard. Then, it is also possible to compare the different outputs. \n\n\nThe following example uses a prompt to specifically generate source code for\nthe C++03 standard. It implements a simple config parser for the `.ini`\nconfiguration format.\n\n\n```c++\n\n/* Create a config parser which can parse an ini format\n\nUse C++03 standard only\n */\n\n#include \u003Ciostream>\n\n#include \u003Cfstream>\n\n#include \u003Cstring>\n\n#include \u003Cmap>\n\n#include \u003Csstream>\n\n\nclass ConfigParser {\n\nprivate:\n    std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> > config;\n\npublic:\n    ConfigParser() {}\n\n    bool parse(const std::string& filename) {\n        std::ifstream file(filename.c_str());\n        if (!file.is_open()) {\n            return false;\n        }\n\n        std::string line;\n        std::string currentSection;\n\n        while (std::getline(file, line)) {\n            // Trim whitespace\n            size_t first = line.find_first_not_of(\" \\t\");\n            size_t last = line.find_last_not_of(\" \\t\");\n            if (first == std::string::npos) continue; // Skip empty lines\n            line = line.substr(first, (last - first + 1));\n\n            if (line[0] == '[' && line[line.length() - 1] == ']') {\n                // This is a section\n                currentSection = line.substr(1, line.length() - 2);\n            } else {\n                // This is a key-value pair\n                size_t delimiterPos = line.find('=');\n                if (delimiterPos != std::string::npos) {\n                    std::string key = line.substr(0, delimiterPos);\n                    std::string value = line.substr(delimiterPos + 1);\n                    config[currentSection][key] = value;\n                }\n            }\n        }\n\n        file.close();\n        return true;\n    }\n\n    std::string getValue(const std::string& section, const std::string& key) const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt = config.find(section);\n        if (sectionIt != config.end()) {\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt = sectionIt->second.find(key);\n            if (keyIt != sectionIt->second.end()) {\n                return keyIt->second;\n            }\n        }\n        return \"\";\n    }\n\n    void printConfig() const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt;\n        for (sectionIt = config.begin(); sectionIt != config.end(); ++sectionIt) {\n            std::cout \u003C\u003C \"[\" \u003C\u003C sectionIt->first \u003C\u003C \"]\" \u003C\u003C std::endl;\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt;\n            for (keyIt = sectionIt->second.begin(); keyIt != sectionIt->second.end(); ++keyIt) {\n                std::cout \u003C\u003C keyIt->first \u003C\u003C \" = \" \u003C\u003C keyIt->second \u003C\u003C std::endl;\n            }\n            std::cout \u003C\u003C std::endl;\n        }\n    }\n};\n\n\nint main() {\n    ConfigParser parser;\n    if (parser.parse(\"config.ini\")) {\n        std::cout \u003C\u003C \"Config file parsed successfully.\" \u003C\u003C std::endl;\n        parser.printConfig();\n\n        std::cout \u003C\u003C \"Value of 'key1' in 'section1': \" \u003C\u003C parser.getValue(\"section1\", \"key1\") \u003C\u003C std::endl;\n    } else {\n        std::cout \u003C\u003C \"Failed to parse config file.\" \u003C\u003C std::endl;\n    }\n    return 0;\n}\n\n```\n\n\nThe same prompts can be applied to [explaining\ncode](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide),\n[generating\ntests](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide),\nand even [refactoring\ncode](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide):\n`/explain focus on C++03 features`, `/tests for C++03 features`, `/refactor\ninto C++03`. \n\n\nThe following C++ standards have been tested working with GitLab Duo Code\nSuggestions:\n\n\n- C++03\n\n- C++11\n\n- C++14\n\n- C++17\n\n- C++20\n\n- C++23\n\n\nYou can find the source code in the [Challenge - Refactor Cpp Standards\nConfig Parser\nproject](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser). \n\n\nThere are many different combinations to refactor source code between the\nC++ standards. The following examples reflect on modernizing the C++ code\nwith version upgrades, but also test a downgrade where ambitious developers\nwanted to use C++20 but had to use C++11 to support enterprise customer\nenvironments.\n\n\n#### Migration: Refactor C++03 into C++14 \n\n\nThe goal is to modernize the code, and understand the changes between C++03\nand C++14. Open the\n[03.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/03.cpp?ref_type=heads)\nsource code in the IDE, select it, and use the following Chat prompt: \n\n\n```markdown\n\n/refactor into C++14 and explain the changes\n\n```\n\n\nPotential Chat response: \n\n\n```markdown\n\n…\n\n\nExplanations of the changes:\n\n\n1. Default constructor: Changed `ConfigParser() {}` to `ConfigParser() =\ndefault;` to use the default constructor.\n\n2. File opening: Removed `.c_str()` when opening the file, as C++11 and\nlater support `std::string` directly.\n\n3. Lambda functions: Introduced a lambda function trim to handle string\ntrimming, making the code more readable and reusable.\n\n4. Auto keyword: Used `auto` for type deduction in various places, improving\ncode readability.\n\n5. Range-based for loops: In the `printConfig` method, replaced traditional\nfor loops with range-based for loops, which are more concise and less\nerror-prone.\n\n6. Structured bindings: Used structured bindings in the range-based for\nloops to directly access key-value pairs.\n\n7. If statements with initializers: In the `getValue` method, used if\nstatements with initializers to combine the find operation and the check in\na single line.\n\n8. String member functions: Used `front()` and `back()` instead of\narray-style access for the first and last characters of strings.\n\n9. Uniform initialization: Used uniform initialization `{}` instead of\nparentheses `()` for object creation.\n\n10. Removed `std::endl`: Replaced `std::endl` with `'\\n'` for better\nperformance, as `std::endl` forces a flush of the output buffer.\n\n\nThese changes make the code more modern, concise, and potentially more\nefficient while maintaining the same functionality.\n\n\n```\n\n\n![Changes reflected in\ncode](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_01.png)\n\n\n![Changes reflected in code\n2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675060/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_02.png)\n\n\n#### Downgrade: Refactor C++23 to C++11\n\n\nSometimes, developers started a new feature proof-of-concept in a modern C++\nstandard like C++20. The actual supported versions for enterprise customer\nenvironments require older compilers with C++11 support. Downgrading the\nversions come at a cost, and require evaluating which language features are\nstill supported, and which other disadvantages could come across.\n\n\nOpen the\n[23.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/23.cpp?ref_type=heads)\nsource code in the IDE, select it, and use the following Chat prompt: \n\n\n```markdown\n\n/refactor into C++11, explain the necessary changes and potential problems\n\n```\n\n\nPotential Duo Chat response: \n\n\n```markdown\n\nThe main changes required to make this code compatible with C++11 are:\n\n\n1. Remove the C++23 standard comment, as it's no longer applicable.\n\n2. Replace `std::string::find_first_not_of` and\n`std::string::find_last_not_of` with C++11 compatible alternatives in the\ntrim function.\n\n3. Use `const auto&` instead of `auto` for loop variables to avoid\nunnecessary copying.\n\n\nPotential problems in the original code:\n\n\n1. The original code used C++23 features, which might not be widely\nsupported yet.\n\n2. The `trim` function used `std::string::find_first_not_of` and\n`std::string::find_last_not_of`, which are not available in C++11.\n\n3. The use of `auto` instead of `const auto&` in some loops could lead to\nunnecessary copying of complex objects.\n\n\nThese issues have been addressed in the refactored version. The `trim`\nfunction now uses `std::find_if_not` with lambda functions to achieve the\nsame result in a C++11 compatible way. The use of `const auto&` ensures that\nno unnecessary copying occurs when iterating over the config map.\n\n```\n\n\n![The `trim` function now uses `std::find_if_not` with lambda functions to\nachieve the same result in a C++11 compatible\nway](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_01.png)\n\n\n![The use of `const auto&` ensures that no unnecessary copying occurs when\niterating over the config\nmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_02.png)\n\n\n**Async practice**: Test more version refactoring scenarios.\n\n\n### Explain and refactor COBOL\n\n\nYou can use GitLab Duo to explain the source code, analyze, fix and refactor\nfor COBOL programs. I have never written nor learned COBOL, and found this\nhelpful [COBOL Programming\nCourse](https://github.com/openmainframeproject/cobol-programming-course)\nwith many examples.\n\n\nI then asked Chat how to get started with COBOL, create a COBOL program, and\ncompile a COBOL program on macOS.\n\n\n```markdown\n\nPlease explain what COBOL is and its syntax\n\n\nPlease create a COBOL program that shows the first steps\n\n\nTell me more about the COBOL compiler. Which system do I need? Can I do it\non my macOS?\n\n\n```\n\n\n![Asking GitLab Duo Chat to explain and its\nsyntax](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/vscode_chat_cobol_generate_example.png)\n\n\nOpen a COBOL program, select the source code, switch to Duo Chat and send\nthe `/explain` prompt to explain purpose and functionality.\n\n\nYou can also refine the prompts to get more high-level summaries, for\nexample:\n\n\n```markdown \n\n/explain like I am five\n\n```\n\n\n> Tip: Programming languages share similar algorithms and functionality. For\nCOBOL, Chat offered to explain it using Python, and, therefore, I adjusted\nfuture prompts to ask for an explanation in Python.\n\n\n```markdown\n\n/explain in a different programming language\n\n```\n\n\nYou can also use the `/refactor` slash command prompt in Chat to improve the\ncode quality, fix potential problems, and try to refactor COBOL into Python.\n\n\n```markdown\n\n/refactor fix the environment error\n\n\n/refactor fix potential problems\n\n\n/refactor into Python\n\n```\n\n\nThe [GitLab Duo Coffee Chat - Challenge: Explain and Refactor COBOL\nprograms](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-explain-refactor-cobol-program)\nrecording shows all discussed steps in a practical use case, including how\nto find a missing period: \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pwlDmLQMMPo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Refactor a language into another language\n\n\nModernization and code quality improvements sometimes require the change of\na programming language. Similar refactor prompts with GitLab Duo can help\nspeed up the migration process. The COBOL example with Python is just one of\nmany requirements in enterprise environments -- let's dive into more use\ncases.\n\n\n### Refactor C to Rust \n\n\nIn early 2024, several programming languages, like C, have been called out\nfor not being memory safe. The recommendations for future projects include\n[memory safe\nlanguages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/)\nlike Rust. But how do you start a migration, and what are the challenges?\n\n\nLet's try it with a simple example in C. The code was generated using Code\nSuggestions and should print the basic operating system information, like\nthe name, version, and platform. The C code compiles cross-platform on\nWindows, Linux, and macOS.\n\n\n```c\n\n// Read OS files to identify the platform, name, versions\n\n// Print them on the terminal\n\n#include \u003Cstdio.h>\n\n#include \u003Cstdlib.h>\n\n#include \u003Cstring.h>\n\n\n#ifdef _WIN32\n    #include \u003Cwindows.h>\n#elif __APPLE__\n    #include \u003Csys/utsname.h>\n#else\n    #include \u003Csys/utsname.h>\n#endif\n\n\nvoid get_os_info() {\n    #ifdef _WIN32\n        OSVERSIONINFOEX info;\n        ZeroMemory(&info, sizeof(OSVERSIONINFOEX));\n        info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);\n        GetVersionEx((OSVERSIONINFO*)&info);\n\n        printf(\"Platform: Windows\\n\");\n        printf(\"Version: %d.%d\\n\", info.dwMajorVersion, info.dwMinorVersion);\n        printf(\"Build: %d\\n\", info.dwBuildNumber);\n    #elif __APPLE__\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: macOS\\n\");\n        printf(\"Name: %s\\n\", sys_info.sysname);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #else\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: %s\\n\", sys_info.sysname);\n        printf(\"Name: %s\\n\", sys_info.nodename);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #endif\n}\n\n\nint main() {\n    get_os_info();\n    return 0;\n}\n\n```\n\n\nOpen the source code in\n[`os.c`](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust/-/blob/897bf57a14bb7be07d842e7f044f93a61456d611/c/os.c)\nin JetBrains CLion, for example. Select the source code and use the Chat\nprompt `/explain` to explain purpose and functionality. Next, use\n`/refactor` in the Chat prompt to refactor the C code, and then take it one\nstep further: `/refactor into Rust`. \n\n\nInitialize a new Rust project (Tip: Ask Duo Chat), and copy the generated\nsource code into the `src/main.rs` file. Run `cargo build` to compile the\ncode. \n\n\n![Initialize a new Rust project, and copy the generated source code into the\n`src/main.rs` file. Run `cargo build` to compile the\ncode.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/jetbrains_clion_c_rust.png)\n\n\nIn the [GitLab Duo Coffee Chat: Challenge - Refactor C into\nRust](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust)\nrecording, you can learn all steps, and additionally, you'll see a\ncompilation error which gets fixed with the help of Chat and `/refactor`\nslash command. The session also shows how to improve the maintanability of\nthe new Rust code by adding more error handling. \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/nf8g2ucqvkI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n### Refactor Perl to Python \n\n\nThat one script that runs on production servers, does its job, the author\nleft the company ten years ago, and nobody wants to touch it. The problem\nmight also apply to multiple scripts, or even a whole application. A\ndecision was made to migrate everything to modern Python 3, with the goal to\nmodernize the code, and understand the changes between Perl and Python.\n\n\nA customer recently asked in a GitLab Duo workshop whether a direct\nmigration is possible using GitLab Duo. Short answer: Yes, it is. Longer\nanswer: You can use refined Chat prompts to refactor Perl code into Python,\nsimilar to other examples in this article.\n\n\nOpen the `script.pl` source code in IDE, select it, and open Chat.\n\n\n```perl\n\n#!/usr/bin/perl\n\nuse strict;\n\nuse warnings;\n\n\nopen my $md_fh, '\u003C', 'file.md' or die \"Could not open file.md: $!\";\n\n\nmy $l = 0;\n\nmy $e = 0;\n\nmy $h = 0;\n\n\nwhile (my $line = \u003C$md_fh>) {\n  $l++;\n  if ($line =~ /^\\s*$/) {\n    $e++;\n    next;\n  }\n  if ($line =~ /^#+\\s*(.+)/) {\n    print \"$1\\n\";\n    $h++; \n  }\n}\n\n\nprint \"\\nS:\\n\"; \n\nprint \"L: $l\\n\";\n\nprint \"E: $e\\n\"; \n\nprint \"H: $h\\n\";\n\n```\n\n\nYou can use the following prompts to:\n\n\n1. `/explain` its purpose, and `/refactor` to improve the code.\n\n2. `/refactor into Python` to get a working Python script.\n\n\n![Refactor into\nPython](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/pycharm_duo_refactor_perl_python.png)\n\n\n> Tip: You can refactor Perl code into more target languages. The [GitLab\nDuo Coffee Chat: Challenge - Refactor Perl to\nPython](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-perl-python)\nrecording shows PHP, Ruby, Rust, Go, Java, VB.NET, C#, and more.\n\n> \n\n> If you want to continue using Perl scripts, you can configure [Perl as\nadditional\nlanguage](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages)\nin Duo Code Suggestions. Chat already understands Perl and can help with\nquestions and slash command prompts, as you can see in the following\nrecording.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/03HGhxXg9lw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## More Refactoring Hands-on \n\n\n### Refactor JavaScript \n\n\nEddie Jaoude shows how to refactor JavaScript to improve code quality or add\nfunctionality in a practical example. \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mHn8KOzpPNY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n### Refactor Bash to ZSH or SH\n\n\nI have used Bash as a shell for 20 years and most recently switched to ZSH\non macOS. This resulted in script not working, or unknown errors in my\nterminal. Another use case for refactoring are shell limitations – some\noperating systems or Linux/Unix distributions do not provide Bash, only SH,\nfor example, Alpine.\n\n\n![Refactor shell\nscripts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellj_refactor_shell_scripts.png)\n\n\nThe [GitLab Duo Coffee Chat: Challenge - Refactor Shell\nScripts](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-shell-scripts)\nshows an example with a C program that can tail syslog files, and a build\nscript written in Bash. Throughout the challenge, Chat is queried with\n`/explain` and `/refactor` prompts to improve the code. It is also possible\nto refactor Bash into POSIX-compliant SH or ZSH. The session concludes with\nasking Chat to provide five different Shell script implementations, and\nexplain the key summaries. \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mssqYjlKGzU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n### More use cases and tutorials\n\n\n- [Documentation: GitLab Duo use\ncases](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html)\n\n- [Tutorial: Top tips for efficient AI-powered code suggestions with GitLab\nDuo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n\n- [Tutorial: 10 best practices for using AI-powered GitLab Duo\nChat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n\n\n## Key takeaways \n\n\n1. GitLab Duo provides efficient help with explaining and refactoring code. \n\n1. You can refactor code between language standards, and ask follow-up\nquestions in Chat.\n\n1. Code Suggestions prompts can generate specific language standards, and\ncode completion respects the current code context. \n\n1. Refactoring code into new programming languages helps with longer term\nmigration and modernization plans.\n\n1. Code can be \"downgraded\" into older system's supported language\nstandards.\n\n1. GitLab Duo can explain complex code and programming languages with\ndifferent programming language examples.\n\n1. The update to Anthropic Claude 3.5 on GitLab.com has improved the quality\nand speed of Code Suggestions and Chat once again (self-managed upgrade to\n17.3 recommended).\n\n1. There are no boundaries except your imagination, and production pain\npoints.\n\n\nLearn more about efficient Code Suggestions and Chat workflows, and start\nyour AI-powered code refactoring journey with GitLab Duo today!\n\n\n> [Start your free trial of GitLab\nDuo!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro_)\n",[9,737,716],{"slug":2793,"featured":6,"template":693},"refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","content:en-us:blog:refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","Refactor Code Into Modern Languages With Ai Powered Gitlab Duo","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"_path":2799,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2800,"content":2805,"config":2811,"_id":2813,"_type":14,"title":2814,"_source":16,"_file":2815,"_stem":2816,"_extension":19},"/en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"title":2801,"description":2802,"ogTitle":2801,"ogDescription":2802,"noIndex":6,"ogImage":1965,"ogUrl":2803,"ogSiteName":706,"ogType":707,"canonicalUrls":2803,"schema":2804},"Refactoring JavaScript to TypeScript with GitLab Duo Workflow","Learn how we used our autonomous AI agent, which sits in your development environment, to convert a real-world JavaScript application to TypeScript.","https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring JavaScript to TypeScript with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Frédéric Caplette\"}],\n        \"datePublished\": \"2025-05-22\",\n      }",{"title":2801,"description":2802,"authors":2806,"heroImage":1965,"date":2808,"body":2809,"category":10,"tags":2810},[2807],"Frédéric Caplette","2025-05-22","TypeScript adoption continues to grow, with over 88% of developers reporting\nthey either use or want to use it. Yet, migrating existing JavaScript\ncodebases to TypeScript is often a time-consuming process. Enter [GitLab Duo\nWorkflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/):\nsecure, agentic AI that sits right inside your development environment,\nhelping transform high-level tasks into executable workflows. In this\narticle, you'll learn how we used Duo Workflow to update Duo Workflow,\nconverting a real-world JavaScript application to TypeScript. We'll also\nreview the technical process and broader implications for development\nworkflows.\n\n\nThis video walks through visually what you'll read below:\n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe\nsrc=\"https://player.vimeo.com/video/1085078036?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\"\nframeborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture;\nclipboard-write; encrypted-media\"\nstyle=\"position:absolute;top:0;left:0;width:100%;height:100%;\"\ntitle=\"Refactor JavaScript to TypeScript with GitLab Duo\nWorkflow\">\u003C/iframe>\u003C/div>\u003Cscript\nsrc=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\n## The challenge: Refactor JS to TS\n\n\nWe decided to migrate Duo Workflow client-related logic to TypeScript for\nbetter type safety and auto-complete. A JavaScript-to-TypeScript migration\ninvolves more than just changing file extensions. It requires:\n\n\n1. Analyzing existing code patterns to determine appropriate types\n\n2. Handling edge cases where type inference is ambiguous\n\n3. Ensuring consistency across the codebase\n\n4. Managing dependencies and third-party libraries\n\n5. Validating that runtime behavior remains unchanged\n\n\nDoing all of this work manually can be very time consuming and not much fun.\nThankfully, managing your projects is easier and more efficient with Duo\nWorkflow – even when the project is Duo Workflow.\n\n\n## How it works\n\n\nIf you've used AI coding assistants before, you're likely familiar with\ntheir chat-based interfaces (like [GitLab Duo\nChat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)).\nThese chats can provide code snippets, suggested directory structures, and\nmore. GitLab Duo Workflow fundamentally differs from these in several key\nways:\n\n\n1. It's contextually aware of your entire codebase, not just open files or\nhighlighted text. You can also add context from relevant GitLab issues or\nmerge requests.\n\n2. It develops and executes a multi-stage plan, allowing for more complete\nsolutions.\n\n3. It keeps a human in the loop, ensuring you understand and approve of what\nis happening at each step of the way.\n\n4. It stages changes to your repository, giving you control over what to\naccept.\n\n\nDuo Workflow uses an agentic AI approach, powered by Claude 3.5 Sonnet, to\nunderstand code semantics while maintaining a deep integration with GitLab's\nDevOps platform.\n\n\nLet's walk through a real-world example of refactoring a JavaScript file\nusing GitLab Duo Workflow. In our case, we started with the prompt:\n\n\n```unset\n\nI need to refactor a JavaScript file\npackages/webview_duo_workflow/src/app/stores/workflow.js to TypeScript.\nPlease analyze the code, identify any issues, and convert it to TypeScript\nwith appropriate type definitions.\n\n```\n\n\n### Analysis\n\n\nAfter prompting, Duo Workflow first analyzed the codebase structure:\n\n\n* Identified that `workflow.js` is a Pinia store implementation\n\n* Noted it uses imports from various modules including `constants.ts` (which\nwas already in TypeScript)\n\n* Recognized GraphQL queries import exists but the file wasn't accessible\n\n\nBased on this analysis, Duo Workflow identified specific typing needs:\n\n\n**Type definitions needed for:**\n\n\n* Store State interface with precise types:\n  * `areWorkflowsLoading`: boolean\n  * `isLoadingWorkflow`: boolean\n  * `isLoadingRecentWorkflows`: boolean\n  * `recentWorkflows`: array\n  * `workflows`: array\n  * `workflowsPageInfo`: object\n  * `initialState`: object\n  * `activeWorkflow`: object with specific shape\n\n**Interface definitions required:**\n\n\n* Workflow interface with properties:\n  * `id`: string\n  * `goal`: string\n  * `status`: string\n  * `checkpoint`: object\n  * `type`: string\n\n**Parameter types needed for:**\n\n\n* GraphQL response types for workflows data\n\n* Event handler parameter types\n\n* Function parameter types for actions like `getWorkflowById`,\n`runWorkflow`, etc.\n\n\nDuo Workflow identified several complex patterns requiring special handling:\n\n\n* Uses optional chaining extensively\n\n* Has nested object destructuring\n\n* Contains GraphQL integration\n\n* Uses Pinia store patterns with state, getters, and actions\n\n* Implements an events mapping system\n\n\nThe system recognized external type dependencies:\n\n\n* Pinia store types\n\n* GraphQL query response types\n\n* Functions from an internal Bridge module were also properly recognized\n(for `sendRequest`, `sendGraphqlRequest`)\n\n\nThe final analysis concluded: \"The file will need to be renamed from .js to\n.ts and appropriate type definitions added while maintaining the existing\nfunctionality.\"\n\n\n### Planning\n\n\nBased on this thorough analysis, Duo Workflow created a structured plan\nconsisting of specific tasks:\n\n\n1. **Create a new TypeScript file** `workflow.ts` in the same directory as\nthe original JS file.\n\n\n2. **Create TypeScript interfaces** for the store state and workflow types\nat the top of the new file, including:\n\n   * WorkflowState interface for store state\n   * Workflow interface for workflow object structure\n   * WorkflowPageInfo interface for pagination info\n   * WorkflowCheckpoint interface for checkpoint data\n\n3. **Copy and add type annotations** to the store implementation from\n`workflow.js`, including:\n\n   * State return type\n   * Getter return types\n   * Action parameter types\n   * Action return types\n\n4. **Update all imports** in `workflow.ts` to:\n\n   * Add type imports from Pinia\n   * Update local imports to use .ts extensions where applicable\n   * Import any required type definitions\n\n### Execution\n\n\nAfter the plan is complete, we were prompted to  “Approve plan.” Before\nclicking approve, we reviewed each step and ensured we were comfortable with\nthe plan. After approval, Duo Workflow showed its progress through each step\nwith visual indicators and explanations of what API operations were\nsupporting each task (like \"Supported by: create_file_with_contents\" or\n\"Supported by: edit\\_file\"). When the work was done, we reviewed the changes\nbefore committing.\n\n\n## What we learned\n\n\nThis JavaScript-to-TypeScript migration example showcases a significant\nevolution in AI-assisted development. What makes GitLab Duo Workflow\nparticularly interesting is its approach to:\n\n\n### Task-oriented programming vs. suggestion-only assistance\n\n\nUnlike many AI assistants that simply offer code snippets or suggestions,\nDuo Workflow understands and executes complete tasks. The difference is\nsignificant — rather than saying \"here's some TypeScript code you might\nuse,\" it says \"I'll convert this file for you, here's my plan, and here are\nthe changes I'm making.\"\n\n\n### Contextual understanding of the entire codebase\n\n\nThe tool demonstrates awareness of project structure, related files (like\nconstants.ts and GraphQL queries), and the relationships between components.\nThis contextual understanding allows for more sophisticated conversions than\nlocalized transformations.\n\n\n### Step-by-step execution with visibility\n\n\nThe plan-based approach, with clear steps and progress indicators, provides\ntransparency into what would otherwise be a black-box process. This allows\ndevelopers to understand what the AI is doing and how it's approaching the\nproblem.\n\n\n> GitLab Duo Workflow is currently available in private beta for GitLab\nUltimate customers. [Sign up for the waitlist\ntoday!](https://about.gitlab.com/gitlab-duo/agent-platform/)\n\n\n## Learn more\n\n\n- [Agentic AI guides and\nresources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n\n- [GitLab Duo\nWorkflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n",[9,737,496,717],{"slug":2812,"featured":91,"template":693},"refactoring-javascript-to-typescript-with-gitlab-duo-workflow","content:en-us:blog:refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","Refactoring Javascript To Typescript With Gitlab Duo Workflow","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"_path":2818,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2819,"content":2825,"config":2830,"_id":2832,"_type":14,"title":2833,"_source":16,"_file":2834,"_stem":2835,"_extension":19},"/en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"title":2820,"description":2821,"ogTitle":2820,"ogDescription":2821,"noIndex":6,"ogImage":2822,"ogUrl":2823,"ogSiteName":706,"ogType":707,"canonicalUrls":2823,"schema":2824},"Remediating vulnerabilities with GitLab's security insights and AI","Learn how to leverage vulnerability insights and the Explain this Vulnerability AI feature to not only resolve a vulnerability, but also understand it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remediating vulnerabilities with GitLab's security insights and AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":2820,"description":2821,"authors":2826,"heroImage":2822,"date":2827,"body":2828,"category":10,"tags":2829},[1436],"2023-08-31","We recently introduced [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a\ncomplete suite of AI capabilities to power your DevSecOps workflows. GitLab\nDuo's AI features not only enable you to write secure code faster, but also\nenhance productivity by providing helpful explanations and insights into\nyour code. For instance, you can harness the power of AI to prevent security\nbreaches. In this tutorial, we will go over the Explain this Vulnerability\nAI feature, which is in beta, and how it can be used with vulnerability\ninsights to remediate vulnerabilities.\n\n\nYou will learn the following:\n\n* How the Explain this Vulnerability AI feature works\n\n* Prerequisites for Explain this Vulnerability and other GitLab AI features\n\n* How GitLab Vulnerability Insights assists in remediation\n\n* How to remediate a SQL-injection vulnerability using GitLab's\nvulnerability insights and Explain this Vulnerability\n\n* Additional GitLab AI capabilities (GitLab Duo currently requires\nconnectivity to access Google large language models (LLMs), however, there\nare plans to expand these features to limited-connectivity environments)\n\n\nSee the following video for a quick overview of Vulnerability Insights + AI\n\"Explain this Vulnerability\". \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/1UagZx_CUks\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nYou can also see a detailed walkthrough of [Leveraging GitLab Vulnerability\nInsights + AI to Remediate a\nSQL-Injection](https://youtu.be/EJXAIzXNAWQ?feature=shared) in the [Solving\na SQL injection using vulnerability insights and\nAI](#solving-a-sql-injection-using-vulnerability-insights-and-ai) section\nbelow.\n\n\n## What is the Explain this Vulnerability AI feature?\n\nThe [Explain this\nVulnerability](https://docs.gitlab.com/ee/user/ai_features.html#explain-this-vulnerability-in-the-web-ui)\nfeature\n\nleverages an LLM powered by Google AI to assist you in securing your\napplication by:\n\n* Summarizing detected vulnerabilities\n\n* Helping developers and security analysts understand the vulnerability and\nits implications\n\n* Showing how a vulnerability can be exploited with detailed example code\n\n* Providing in-depth solutions to the vulnerability\n\n* Providing suggested mitigation along with sample code tuned toward your\nproject's programming language\n\n\nTo begin using Explain this Vulnerability, you must have the following\nprerequisites configured:\n\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) SaaS\nsubscription\n\n* [Experiment features\nenabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-experiment-features)\n\n* [Third-party AI features\nenabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-third-party-ai-features)\n\n* Static application security testing\n([SAST](https://docs.gitlab.com/ee/user/application_security/sast/))\nvulnerability finding in the default branch of a project\n\n* [Maintainer](https://docs.gitlab.com/ee/user/permissions.html) or greater\nrole in the vulnerable project \n\n* [SAST scanner](https://docs.gitlab.com/ee/user/application_security/sast/)\nenabled in the vulnerable project\n\n* An active internet connection\n\n\nOnce the prerequisites have been configured, to begin using Explain this\nVulnerability, perform the following steps:  \n\n\n1) Navigate to the [Vulnerability\nReport](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/).  \n\n2) Find a SAST vulnerability finding.  \n\n3) Scroll to the bottom of the [vulnerability\npage](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/).  \n\n4) Press the **Try it out** button in \"Explain this Vulnerability and how to\nmitigate it with AI\" section.  \n\n\n![View of the \"Try it out\" button at bottom of\nscreen](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\n\nOnce you click the button, GitLab will begin to generate the following:\n\n* **What is the vulnerability?**: Details on the vulnerability and how it\nmay affect your application\n\n* **How can an attacker take advantage of the vulnerability?**: Commands\nthat a malicious actor can use to exploit the vulnerability\n\n* **How can the vulnerability be fixed?**: Details on how the vulnerability\ncan be remediated\n\n* **Example of vulnerable code**: The actual vulnerable code in the language\nof your application\n\n* **Example of fixed code**: Code showing a fix that should be applied to\nremediate the vulnerability in the language of your application\n\n* **References**: Links providing details relevant to the vulnerability\n\n* **User rating request**: Allows for user input, which is used to improve\nthe model\n\n\n![AI response depicting the above\nlist](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n\nThis information can be used together with vulnerability insights to resolve\nthe vulnerability. Now let's discuss vulnerability insights.\n\n\n## Vulnerability insights\n\nVulnerability insights provide detailed information on a vulnerability and\nhow to resolve it. This detailed information\n\nincludes:\n\n\n* **Description**: A detailed description of the vulnerability and its\nimplications\n\n* **Severity**: The severity of the vulnerability based on the [CVSS\nrating](https://nvd.nist.gov/vuln-metrics/cvss)\n\n* **Project**: The project where the vulnerability was found\n\n* **Tool**: The type of scanner that found the vulnerability\n\n* **Scanner**: The specific name of the scanner that found the vulnerability\n\n* **Location**: The line of code where the vulnerability is present\n\n* **Identifiers**: Links that identify and provide additional information on\nthe vulnerability such as the CVE/CWE page\n\n* **Training**: Security training available from our partners to educate\ndevelopers on the vulnerability\n\n* **Solution**: Information on how to remediate the vulnerability\n\n* **Method**: The [REST API\nmethod](https://www.w3schools.in/restful-web-services/rest-methods) used to\nexploit the vulnerability (dynamic scanners only)\n\n* **URL**: The URL in which the vulnerability was detected (dynamic scanners\nonly)\n\n* **Request/response**: The request sent and response received when\nexploiting the vulnerability (dynamic scanners only)\n\n\n**Note**: Results may vary depending on the scanner used.\n\n\nHaving all this information not only allows you to resolve a vulnerability\nwith ease but also enhances your security\n\nknowledge. All these insights are provided as a single source of truth that\nboth developer and security teams can view and\n\ntake action on asynchronously.\n\n\nDevelopers can leverage insights within a merge request (MR). The MR\ninsights show the vulnerabilities in the diff\n\nbetween a feature branch and the branch you are merging into. This allows\nyou to continuously iterate until you have resolved\n\na vulnerability and then alert security engineers when approval is required,\ngiving developers the power to resolve\n\nvulnerabilities themselves.\n\n\n![MR insights\nsample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_mr_view.png)\n\n\nThe security team can leverage insights via the [vulnerability\nreport](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/).\nThe vulnerability report shows vulnerabilities present in the `default`\nbranch, which is typically linked to production. From here, the security\nteam can collaborate on a resolution as well as triage and manage\nvulnerabilities.\n\n\n![Vulnerability report\nsample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n\n**Note**: Currently, the Explain this Vulnerability feature can only be seen\nin the Vulnerability Report view. It is currently\n\nbeing considered for the MR view, see [future iterations under\nconsideration](https://gitlab.com/groups/gitlab-org/-/epics/10284#future-iterations-under-consideration)\nfor more information.\n\n\n## Solving a SQL injection using vulnerability insights and AI\n\nBy leveraging both vulnerability insights and Explain this Vulnerability, we\nhave all the resources necessary to\n\nnot only resolve a vulnerability but also understand it. Let's see how we\ncan use these features to [solve a SQL\ninjection](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/anatomy_of_a_vulnerability/). \n\n\nNow let's go over the steps to remediate a SQL injection. You can follow\nalong with the video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/EJXAIzXNAWQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n**Privacy notice**: Explain this Vulnerability only uses `public repos` to\ntrain the LLM. Code in private repositories\n\nis not transferred to the LLM.\n\n\nI will be using the [Simple Notes\nproject](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\nto showcase this. You can set up DevSecOps within GitLab yourself by going\nover the following\n[tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\nAfter you have done so, you can run through the following:\n\n\n1) Navigate to **Secure > Vulnerability Report**.\n\n\n2) Sort by **SAST** under **Scanner**.\n\n\n3) Find and select a SQL injection vulnerability. a SQL injection will be\ntitled something like\n\n`Improper Neutralization of Special Elements used in an SQL Command ('SQL\nInjection')`.\n\n\n4) Examine the vulnerability insights.\n\n\n* **Description**: Detected possible formatted SQL query.\n\n* **Location**: File:\n[notes/db.py:100](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/-/blob/24ff1847aa70c4d51482fe28f019e3724b399aaf/notes/db.py#L100)\n\n* **Identifier**: [bandit.B608](https://semgrep.dev/r/gitlab.bandit.B608),\n[CWE-89](https://cwe.mitre.org/data/definitions/89.html)\n\n* **Solution**: Use parameterized queries instead=\n\n* **Training**: [Secure Code\nWarrior](https://portal.securecodewarrior.com/?utm_source=partner-integration:gitlab&partner_id=gitlab#/contextual-microlearning/web/injection/sql/python/vanilla),\n[SecureFlag](https://knowledge-base.secureflag.com/vulnerabilities/sql_injection/sql_injection_python.html),\nand\n[Kontra](https://application.security/gitlab/free-application-security-training/owasp-top-10-sql-injection)\n\n\n![SQL Injection Walkthrough -\nInsights](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n\n5) Scroll down to the \"Explain this vulnerability and how to mitigate it\nwith AI** section and click the **Try it out** button.\n\n\n**Privacy notice**: If the **Send code to prompt** radio button is selected,\nresponse quality is improved. However, the actual code is\n\nused in a query to the LLM (even in private repositories).\n\n\n![SQL Injection Walkthrough - AI \"Try it out\"\nbutton](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\n\n6) Examine the provided AI solutions.\n\n\n![SQL Injection Walkthrough - AI\nresponse](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n\n7) Exploit the vulnerability\n\nWe can use the information provided in the **AI response**, the samples in\nthe **vulnerability insight CWE identifier**,\n\nand the applications [API\nguide](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/api_guide/)\nto generate a malicious curl command as follows:\n\n\n```bash\n\n# A REGULAR API-CALL\n\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\n\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN\nIDENTIFIERS\n\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog'), (5, 'meow'), (6,\n'bark'), (7, 'ribbit'), (8, 'grunt')]\"}\n\n```\n\n\nThis shows us that we can exploit the SQL injection since we exposed data we\nshould not have access to.\n\nExploiting a vulnerability is not always as simple, so it is important to\ncombine resources as noted above\n\nto figure out exploitability.\n\n\n8) Determine a fix.\n\n\nNow that we know this is a problem within our system, we can use the\nprovided information to create an merge request (MR) to resolve\n\nand then test the MR in a non-production environment. Reviewing the\nvulnerability insights and AI response, we know we can solve this\n\nin a variety of ways. For example, we can:\n  \n* Use parameterized queries rather than directly calling the query\n\n* Sanitize the input before passing it to the `execute()` method\n\n\nTo enhance our knowledge, we should read\n[CWE-89](https://cwe.mitre.org/data/definitions/89.html) provided in the\nIdentifiers.\n\n\n9) Open the [GitLab\nWebIDE](https://docs.gitlab.com/ee/user/project/web_ide/) or editor of your\nchoice.\n\n\n10) Open the vulnerable file and scroll to the affected line of code. We\nfound this using the information provided in the insights.\n\n\n11) Apply the suggested change by reviewing the vulnerability insights and\nAI response. I changed the following:\n\n\n```python\n\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\" % id\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\" % id\n  # NOT USING A PARAMETERIZED QUERY - SQL INJECTION CAN BE PASSED IN (,id)\n  cur.execute(query)\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\n\nto \n\n\n```python\n\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\"\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\"\n  # USING A PARAMETERIZED QUERY - SQL INJECTION CANNOT BE PASSED IN (,id)\n  cur.execute(query, (id,))\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\n\nWe know this is the solution because parameterized queries as explained do\nnot allow actual SQL\n\ncommands to be run. Therefore, a SQL injection cannot be passed as the `id`.\nAdding a parameterized\n\nquery is easy since it is built into the Python db library we are using.\n\n\nThere may be multiple solutions to a vulnerability. It is up to the user to\ndecide what is best\n\nfor their application and workflow. The AI response provides a typical\nsolution, but more can be\n\nexamined and applied. For example, the AI response said we can add the\nfollowing:\n\n\n```python\n\ncur.execute(query.replace(\"'\", \"''\"))\n\n```\n\n\nThis would escape the single quotes in the input, making it safe to pass to\nthe `execute()` method.\n\nIt is a valid solution with less code required. However, I wanted to\nrestructure my code, so I applied\n\nanother solution found in the vulnerability insights.\n\n\n12) Create an MR with the fix. In my environment, feature branches are\nautomatically deployed\n\nto a new environment independent from production so we can test our features\nbefore merging them\n\nto production.\n\n\n13) Test the change in a non-production environment.\n\n\nOnce we push the MR, we can see if the vulnerability has been resolved and\nwe can test in a non-production\n\nenvironment:\n\n\n```bash\n\n# A REGULAR API-CALL\n\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\n\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN\nIDENTIFIERS\n\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n\n$ curl\nhttp://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n\n{\"Note\":\"[(1, 'cat')]\"}\n\n```\n\n\nWe can see that now the additional query parameters `or 1=1` are ignored and\nonly the first element\n\nis returned, meaning only the `1` was passed. We can further test if we can\nget item `5` which we should\n\nnot have access to:\n\n\n```bash\n\n# API CALL PASSING '5 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN\nIDENTIFIERS\n\n# NOTE: `5%20or%201%3D1` IS URL ENCODED '5 or 1=1'\n\n$ curl\nhttp://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=5%20or%201%3D1\n\n{\"Note\":\"[]\"}\n\n```\n\n\nSuccess, the SQL injection is no longer present!\n\n\n14) Merge into production.\n\n\nNow that we know the vulnerability has been resolved we can go ahead and\nmerge our fix! This is how you can use vulnerability insights\n\nto help resolve your vulnerabilities. If you wish to test all this for\nyourself, check out the complete [GitLab DevSecOps\ntutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\n\n\n## Additional GitLab AI features\n\nAs we have seen above, Explain this Vulnerability assists you in remediating\nthe vulnerabilities within your\n\ndefault branch, but that's not the only AI feature GitLab has available!\nOther AI features to enhance your productivity include:\n\n\n* [Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html):\nEnables you to write code more efficiently by viewing code suggestions as\nyou type\n\n* [Suggested\nReviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers):\nHelps you receive faster and higher-quality reviews by automatically finding\nthe right people to review a merge request\n\n* [Value Stream\nForecasting](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html):\nPredicts productivity metrics and identifies anomalies across your software\ndevelopment lifecycle\n\n* [Summarize Issue\nComments](https://docs.gitlab.com/ee/user/ai_features.html#summarize-issue-discussions):\nQuickly gets everyone up to speed on lengthy conversations to ensure you are\nall on the same page\n\n* [Summarize Proposed Merge Request\nChanges](https://docs.gitlab.com/ee/user/ai_features.html#summarize-my-merge-request-review):\nHelps merge request authors drive alignment and action by efficiently\ncommunicating the impact of their changes\n\n* [Summarize Merge Request\nReview](https://docs.gitlab.com/ee/user/ai_features.html#summarize-merge-request-changes):\nEnables better handoffs between authors and reviewers and helps reviewers\nefficiently understand merge request suggestions\n\n* [Generate Tests in Merge\nRequests](https://docs.gitlab.com/ee/user/ai_features.html#generate-suggested-tests-in-merge-requests):\nAutomates repetitive tasks and helps you catch bugs early\n\n* [GitLab\nChat](https://docs.gitlab.com/ee/user/ai_features.html#gitlab-duo-chat):\nHelps you quickly identify useful information in large volumes of text, such\nas documentation\n\n* [Explain this\nCode](https://docs.gitlab.com/ee/user/ai_features.html#explain-selected-code-in-the-web-ui):\nAllows you to get up to speed quickly by explaining source code\n\n\nVisit our [GitLab Duo site](https://about.gitlab.com/gitlab-duo/) to learn\nmore about these features, GitLab's mission around AI, and our partnership\nwith Google.\n",[9,691,737],{"slug":2831,"featured":6,"template":693},"remediating-vulnerabilities-with-insights-and-ai","content:en-us:blog:remediating-vulnerabilities-with-insights-and-ai.yml","Remediating Vulnerabilities With Insights And Ai","en-us/blog/remediating-vulnerabilities-with-insights-and-ai.yml","en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"_path":2837,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2838,"content":2843,"config":2848,"_id":2850,"_type":14,"title":2851,"_source":16,"_file":2852,"_stem":2853,"_extension":19},"/en-us/blog/self-managed-support-for-code-suggestions",{"title":2839,"description":2840,"ogTitle":2839,"ogDescription":2840,"noIndex":6,"ogImage":868,"ogUrl":2841,"ogSiteName":706,"ogType":707,"canonicalUrls":2841,"schema":2842},"Self-managed support for Code Suggestions (Beta)","Self-managed support for Code Suggestions (Beta) is coming in GitLab 16.1.","https://about.gitlab.com/blog/self-managed-support-for-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Self-managed support for Code Suggestions (Beta)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Roger Woo\"}],\n        \"datePublished\": \"2023-06-15\",\n      }",{"title":2839,"description":2840,"authors":2844,"heroImage":868,"date":2845,"body":2846,"category":10,"tags":2847},[2181],"2023-06-15","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nGitLab [Code Suggestions (Beta)](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html) uses generative AI to suggest code while you’re developing. Since introduction in GitLab SaaS with 15.9, we've heard self-managed customers have wanted to use the feature. We're now bringing Code Suggestions to self-managed instances beginning with GitLab 16.1 (expected to be released June 22). Self-managed users working with VS Code or GitLab’s WebIDE can now receive code suggestions to help accelerate your development efforts while you type.\n\n## How does Code Suggestions work?\nA self-managed instance administrator must enable Code Suggestions on behalf of an organization's entire instance. Once enabled, users of that self-managed instance can authenticate their IDE to GitLab.com infrastructure using a secure handshake with the user's self-managed instance. As users type in their IDE, a context window containing relevant source code is securely transmitted to GitLab’s infrastructure, which will return an AI-generated code suggestion. GitLab does not have any visibility into a self-managed customer’s code other than what is sent to generate the Code Suggestion. GitLab does not persist any customer code sent in that context window nor train on customer data.\n\nIn this video, Senior Backend Engineer [Nikola Milojevic](https://gitlab.com/nmilojevic1) demonstrates how to set up and configure GitLab’s Code Suggestions for self-managed users on VS Code.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/-A4amG3E49Y\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## How Code Suggestions uses data\nGitLab is mindful of privacy when we design our AI-powered features. With Code Suggestions, we securely transmit the data needed to generate a code suggestion, and we process all personal data in accordance with our [privacy statement](https://about.gitlab.com/privacy/). Our VS Code Workflow extension will securely transmit a minimal amount of data required to generate a code suggestion. GitLab does not receive any information outside of an IDE’s context. Read more about [Code Suggestions data usage](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#data-privacy).\n\n## Try Code Suggestions\nWe will be provisioning access on a customer-by-customer basis for this initial iteration of GitLab Code Suggestions (Beta) on self-managed instances. Please leave a note in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/415393) tagging your Customer Success Manager for help with enabling Code Suggestions once your instance is ready.\n\nCode Suggestions for self-managed instances will require GitLab 16.1. Customers may try Code Suggestions either via GitLab’s WebIDE or VS Code’s GitLab Workflow Extension (version [3.67+](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow&ssr=false#version-history)).\n\n## Iterating on AI/ML features\nThis is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":2849,"featured":6,"template":693},"self-managed-support-for-code-suggestions","content:en-us:blog:self-managed-support-for-code-suggestions.yml","Self Managed Support For Code Suggestions","en-us/blog/self-managed-support-for-code-suggestions.yml","en-us/blog/self-managed-support-for-code-suggestions",{"_path":2855,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2856,"content":2862,"config":2869,"_id":2871,"_type":14,"title":2872,"_source":16,"_file":2873,"_stem":2874,"_extension":19},"/en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab",{"title":2857,"description":2858,"ogTitle":2857,"ogDescription":2858,"noIndex":6,"ogImage":2859,"ogUrl":2860,"ogSiteName":706,"ogType":707,"canonicalUrls":2860,"schema":2861},"6 reasons Cisco Learning and Certifications adopted GitLab","Learn what Cisco Learning and Certifications's principal engineer and lead architect shared about the organization's use of GitLab at our DevSecOps World Tour in Chicago.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670302/Blog/Hero%20Images/lightbulb-book.png","https://about.gitlab.com/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 reasons Cisco Learning and Certifications adopted GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-09-05\",\n      }",{"title":2857,"description":2858,"authors":2863,"heroImage":2859,"date":2864,"body":2865,"category":2866,"tags":2867},[1379],"2023-09-05","\nAfter adopting the GitLab DevSecOps Platform, the learning and certification arm of tech giant Cisco Systems, Inc. saw its toolchain shrink and was able to offer its entire software development team a way to improve collaboration and accelerate production.\n\nThat’s part of the story that Hank Preston, principal engineer and lead architect with [Cisco Learning and Certifications](https://www.cisco.com/c/en/us/training-events/training-certifications.html), shared in an on-stage interview at the Chicago stop of [GitLab’s DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/).\n\nPreston also spent part of his conversation with Nico Ochoa, senior major account executive at GitLab, talking about the rapid rise in interest in AI and how that is likely to affect everything from how they create software to how newer developers learn to code.\n\nHere are some of the opportunities Cisco Learning and Certifications is gaining from using GitLab:  \n \n## 1. Getting a handle on version control\nPreston said one of the biggest challenges he was looking to overcome by adopting a DevSecOps platform was resolving and [preventing conflicts](/topics/version-control/how-implement-version-control/) they had keeping track of code modifications and version updates, which can be easy to lose sight of when handling them manually. For example, the team had been storing templates and configuration pieces in different places and ended up having an “extreme inability” to know what had been changed and where it went, he said. Using a platform gives the team a way to more easily and clearly manage source and version controls. “It gave us a way to actually keep everything in one place, and understand the trajectory and history of changes,” he said.\n\n## 2. Ensuring accountability\n“Gaining accountability was a big one for us,” Preston told the audience. “We needed to know when something changed, who changed it, and who approved that change.” In an efficient, high-performing DevSecOps team, this kind of transparent accountability is critical. Both the development and operations teams have an equal stake in DevSecOps success. The problem is that it can be difficult to do when team structures and responsibilities are outdated or haven’t been updated in documentation. Clearly distributing functions and noting who needs to do a task, as well as when and who accomplished a task, makes this work. “When I taught a bunch of network engineers how to do merge requests and branching, it became less of a challenge for us,” Preston said. \n\n## 3. Consolidating their toolchain\n“[Reducing our toolchain](/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/) … that was one of the reasons we selected GitLab. I didn't want to have to have a source control tool and a CI tool and an issues tool,” Preston said.\n\nCisco Learning and Certifications still uses Jira — a decision Preston said was not his own. “I'm looking forward to potentially getting rid of [Jira] in the future. But not having to manage a bunch of tools was a big value for us because then there are less things to integrate, less things to upgrade, and less things I have to train people on how to use,” he added.\n\n## 4. Boosting collaboration \nWhen different team members want to [collaborate](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) on a project, sharing responsibilities and information, it’s critical for them to be able to all work in the same place. Preston explained that having a single, end-to-end platform that everyone works in means teammates don’t have to go looking for something across multiple tools. “Now everybody knows where to go to find the code and the templates they need. And the automation pieces are right there,” he added. “We don’t need to go looking for things or logging into, say, Mark's particular OneDrive folder to find a piece we need.” \n\n## 5. Having a single source of truth\n“I'm a big proponent of the concept of a single source of truth,” said Preston. “I tell people that the network shouldn't be the source of truth. That's the implementation of what our source of truth is.”\n\nCisco Learning and Certifications uses GitLab as the source of truth for a lot of its configuration templates, automation scripts and jobs, and infrastructure as code, according to Preston. “So whether it’s our data centers or sites, all the data should be aggregated and stored according to a single reference point. We have a plan and a way of doing things and everyone can see what that is,” he said.\n\n## 6. Speeding up production\nPreston also noted he’s using the platform to gain insight into and improve the time it takes to move a software project from ideation to development and into deployment. “One of the metrics I'm trying to use with our team now is how long it takes us to get something from an idea to working software,” he said. “If I’m trying to get an update into production or if I'm trying to fix something on the infrastructure stack, how long does it take us to get to where we feel comfortable pushing it into production? That's a metric we're watching. I don't want it to take 18 months to go from an idea to release. I needed that to be much faster.”\n\n## How AI could affect software creation, developer learning\nDuring his on-stage talk, Preston also turned his attention to artificial intelligence (AI), speaking about the growing call for the use of [AI in DevSecOps](/blog/ai-assisted-code-suggestions/), how it may be regulated, and how it could affect software developers.\n\n“AI in DevOps has changed dramatically just over the last three months or so,” he said. “The [AI transition](/blog/extending-code-suggestions/) has been rapid. I can't even pretend to guess what the next two years of it will look like, let alone the next five years. I think it’s an interesting challenge, and, well, to be honest, it makes me personally nervous.”\n\nHe added that he’s concerned about the issue of intellectual property and who owns the code that AI generates. Will the software built by AI be owned by the company the code was built for, or by the person or company that created the AI?\n\n“The intellectual property question is a big one. It’s something that has to be figured out,” he said.\n\nAnother thing that nags at Preston is how using AI in DevSecOps will affect the way people learn to develop code if it’s automatically being generated for them. Will people still become coding wizards if they don’t actually have to do it themselves?\n\n“I think about how this will affect the path of software developers and engineers who are coming onto the team,” he said. “How will they become our coding experts in the future? I love the code generation process but it also makes me worried. I wonder — and part of me thinks I'm just being the grouchy old man — how do we teach the new engineers how to make a connection to MySQL if the minute they type ‘M-Y-S-Q-L,’ the code comes up? That’s a scary spot. I know there's a lot of value to the grunt work being thinned out, but there’s value that comes with the doing and learning.”\n\nAnswering a question about whether Cisco allows developers to use AI-generated code, Preston replied, “Generally, no.” He said executives need to work through intellectual property questions first, but AI is a tool they plan to implement.\n\nPreston also was asked about maintaining standards when using AI to generate code. “We're looking for AI solutions we can leverage that will meet the coding requirements we have across the board,” he said. “But I think the question on standards is important. It's going to be the same way we handle it when people write code. Code will all have to be checked against set standards, whether it’s written by people or AI.”\n\n_Preston’s team within Cisco Learning and Certifications is responsible for creating and maintaining the software needed for the digital learning platform that offers instant user access to training information, classes, course materials, and exam preparation resources. Cisco employees use the platform to gain certifications, like CCNA, CCMP, and CCIE._\n\n_GitLab’s DevSecOps World Tour was designed to enable everyone — from technology champions to executives and software development team members — to gather and learn about the ideas and technologies driving the DevSecOps transformation. Learn more about our [DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/)._\n","customer-stories",[716,2868,9],"customers",{"slug":2870,"featured":6,"template":693},"six-reasons-cisco-learning-and-certifications-adopted-gitlab","content:en-us:blog:six-reasons-cisco-learning-and-certifications-adopted-gitlab.yml","Six Reasons Cisco Learning And Certifications Adopted Gitlab","en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab.yml","en-us/blog/six-reasons-cisco-learning-and-certifications-adopted-gitlab",{"_path":2876,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2877,"content":2883,"config":2889,"_id":2891,"_type":14,"title":2892,"_source":16,"_file":2893,"_stem":2894,"_extension":19},"/en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow",{"title":2878,"description":2879,"ogTitle":2878,"ogDescription":2879,"noIndex":6,"ogImage":2880,"ogUrl":2881,"ogSiteName":706,"ogType":707,"canonicalUrls":2881,"schema":2882},"Solving complex challenges with GitLab Duo Workflow","Learn how a member of the GitLab Customer Success Management team uses agentic AI for real-world problem-solving, including addressing Helm chart limits in the package registry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097663/Blog/Hero%20Images/Blog/Hero%20Images/Workflow%201800x945_2gQoQIbY9NvjLFpXtsxtXy_1750097663612.png","https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Solving complex challenges with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johannes Bauer\"}],\n        \"datePublished\": \"2025-04-23\",\n      }",{"title":2878,"description":2879,"authors":2884,"heroImage":2880,"date":2886,"body":2887,"category":10,"tags":2888},[2885],"Johannes Bauer","2025-04-23","As a Customer Success Manager (CSM), I often face complex challenges that require quick, efficient, and innovative solutions. Recently, one of my enterprise customers encountered an issue with Helm chart limits in the GitLab package registry. At GitLab, we are committed to dogfooding our DevSecOps platform, so I turned to [GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/agent-platform/), our secure, agentic AI offering that is currently in private beta.\n\n[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) represents the next evolution of generative AI, designed to go beyond single-task automation, such as code completion or test generation. While generative AI focuses on creating content based on specific prompts, agentic AI introduces a level of autonomy and context awareness, enabling it to perform complex, multi-step workflows. This advanced AI operates as a capable assistant that understands the broader goals of a task, plans intermediate steps, and executes them efficiently.\n\nAgentic AI, more specifically Duo Workflow, has transformed how I approach problem-solving as a CSM. Here's how I used Duo Workflow to resolve a real-world challenge and deliver value to my customer.\n\n## Addressing Helm chart limits in the GitLab package registry\n\nThe customer was dealing with a hardcoded limit for Helm charts in the GitLab package registry, which wasn’t flexible enough for their needs. They required a solution that allowed administrators to configure limits dynamically through the Admin UI. Addressing this issue was critical to maintaining their workflow efficiency and satisfaction.\n\n### How I leveraged GitLab Duo Workflow\n\nTo craft an effective solution, I leveraged Duo Workflow, a game-changer for brainstorming and strategizing with agentic AI.\n\nMy prompt was:\n\n> I am working on the following issue [insert issue number] and would like to resolve it. We have developed the following strategy:\n>\n> 1. Implement the feature as an application setting in the Admin UI, rather than using a configuration file approach.\n> 2. Utilize the existing package_registry jsonb column in the application settings table. Add a new key to this JSON structure to store the Helm chart limit.\n> 3. Create a new section in the Admin UI (/admin/application_settings/ci_cd#js-package-settings) for “specific limits for each format”. This will require some design work to determine how to best display the setting.\n> 4. Add a number field in this new section to allow administrators to set the Helm chart limit.\n> 5. Update the backend logic in the Helm package registry (app/finders/packages/helm/packages_finder.rb) to use this new application setting instead of the hardcoded limit.\n>\n>Please organize your updates into Frontend and Backend changes. Additionally, could you check if there are any specific requirements when adding a new column to the application settings table?\n\nThis prompt laid out a clear plan, enabling Duo Workflow to provide structured recommendations and actionable insights. The strategy developed was straightforward yet impactful. The changes were structured into frontend and backend tasks, ensuring clear separation and collaboration.\n\n![Result of GitLab Duo Workflow prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097674930.png)\n\n![Result of GitLab Duo Workflow prompt - 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097674931.png)\n\n## Results\n\nAfter implementing the solution together with Duo Workflow, I submitted a merge request, which was successfully merged and became available in GitLab 17.10!\n\nThe result? A flexible, user-friendly way for administrators to configure Helm chart limits, significantly enhancing the customer’s experience and aligning with their operational goals.\n\n![GitLab package registry page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097675/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097674933.png)\n\nThe customer is thrilled to have this functionality available as it provides them with the flexibility and control they need to streamline their workflows. What’s even more exciting is that it only took me **hours** to complete the implementation, compared to the **days** I initially estimated, thanks to Duo Workflow. As someone without prior development experience in GitLab, this was a huge win!\n\nThis experience taught me the immense potential of AI tools like [GitLab Duo](https://about.gitlab.com/gitlab-duo/). By combining human expertise with AI-driven insights, we can address even the most complex challenges effectively. As a CSM, leveraging such tools streamlines problem-solving and strengthens trust and partnership with customers.\n\n## Delivering value\n\nGitLab Duo Workflow empowered me to tackle a complex technical issue head-on and find a solution that exceeded customer expectations. If you'd like to try Duo Workflow in your development environment, please [sign up for our private beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/).\n\n## Learn more\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [GitLab Duo Workflow documentation](https://docs.gitlab.com/user/duo_workflow/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)",[9,496,738],{"slug":2890,"featured":91,"template":693},"solving-complex-challenges-with-gitlab-duo-workflow","content:en-us:blog:solving-complex-challenges-with-gitlab-duo-workflow.yml","Solving Complex Challenges With Gitlab Duo Workflow","en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow.yml","en-us/blog/solving-complex-challenges-with-gitlab-duo-workflow",{"_path":2896,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2897,"content":2903,"config":2909,"_id":2911,"_type":14,"title":2912,"_source":16,"_file":2913,"_stem":2914,"_extension":19},"/en-us/blog/southwest-looking-to-help-developers-take-flight",{"title":2898,"description":2899,"ogTitle":2898,"ogDescription":2899,"noIndex":6,"ogImage":2900,"ogUrl":2901,"ogSiteName":706,"ogType":707,"canonicalUrls":2901,"schema":2902},"Southwest looking to help developers take flight","Learn how the airline's DevOps teams are dramatically increasing their ability to detect and resolve problems with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665272/Blog/Hero%20Images/AdobeStock_380312133.jpg","https://about.gitlab.com/blog/southwest-looking-to-help-developers-take-flight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Southwest looking to help developers take flight\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2024-01-30\",\n      }",{"title":2898,"description":2899,"authors":2904,"heroImage":2900,"date":2905,"body":2906,"category":2866,"tags":2907},[1379],"2024-01-30","Southwest Airlines Co. is working to make developers’ jobs easier.\n\nIT leaders at the world's largest low-cost carrier are moving to eliminate time-consuming and repetitive tasks from developers’ workflows, freeing their time and increasing their ability to focus on bigger projects.\n\n“The way we do that is by getting things out of their way,” said Jim Dayton, vice president and CISO at Southwest Airlines. “I am a firm believer that people go into software development because they love the creativity of it. They love the ability to solve problems. What we have to do is get out of their way and get the things that are blocking them out of their way.”\n\nPart of how Dayton is making that happen is by using GitLab’s platform.\n\nDayton talked about Southwest’s efforts to take care of their developers, and promote the work they’re doing, during an on-stage interview at the Dallas stop of [GitLab’s DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/). He also spent part of his conversation with Reshmi Krishna, director of Enterprise Solutions Architecture at GitLab, discussing what benefits he hopes artificial intelligence capabilities will be able to offer his teams.\n\nThe Southwest exec, who said they’re moving toward a DevOps approach to application development, added that they’re providing developers with more self-service capabilities and knowledge management processes. “We want developers to be able to quickly look up a problem, look up a solution, and reduce context switching,” he said. “We need to be able to look at what we are asking them to do and what's preventing them from being able to be productive.”\n\nDayton noted that Southwest, which established a relationship with GitLab in 2019, is focused on creating consistency for its software development processes. In part, that means moving code into a shared GitLab repository. By knowing where all of their code resides, teams will be able to more easily evaluate metrics, and begin to look at creating efficiencies by reusing code. \n\n“We’re also in the process of getting our enterprise pipelines finalized and we’re ready to start migrating teams onto them,” said Dayton. “We're collaborating heavily with a lot of different application development teams to understand what they need in the pipelines that we're building and we’re getting ready to start migrating teams onto them. I think we'll be getting pretty close by the end of the year.”\n\n### The promise of AI\n\nUsing artificial intelligence is one of the ways to enable developers to focus on bigger, more innovative tasks, Dayton explained.\n\nGenerative AI, whether in the form of vulnerability explainers, code suggestions, or code completion, has the ability to dramatically affect workflows across the entire software development lifecycle. Leveraging AI tools built into a platform can increase security and decrease time spent on code reviews and application development.\n\nDayton is looking forward to being able to use AI features to speed and ease development and deployment.\n\n“We want to get the mundane and the bureaucratic out of their way as much as possible,” Dayton said, adding that while there’s a lot of hype around AI, there’s also a lot of promise. “Using AI could do that. I think a great example will be when it can provide a solution to a vulnerability that was just identified or when it can tell us what a piece of code is doing. What is it integrating with? What data is it accessing and why? Tell me in plain English, for example, that this particular set of coding has been responsible for 20% of the incidents in this application over the past year. That’s where I think AI can help.”\n\nDayton noted that he doesn’t believe AI will replace developers. Instead, it should make their jobs easier. Another way AI can help is by connecting developers in a time when many are working remotely post-COVID.\n\n“One of the cool things that's in [GitLab’s] roadmap is Suggested Reviewers,” he said. “Getting help with code reviews used to involve yelling across the room or over a cube wall, ‘Hey, can someone look at my code?’ That’s not so easy now. AI can suggest someone who's actually worked in that code before or who has resolved incidents in that code and does that sort of thing. How much value is that going to add to the review process? I think the more automation we can put in, the less manual steps or wait states there will be.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/UnUfp7pKnEQ?si=qcX2Qm3zpgQOV4xy\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*Southwest Airlines is a nearly $24 billion company based in Dallas, Texas. With 72,000 employees, it flies to 120 destinations, making 4,000 flights per day.  Southwest flies more domestic passengers than any other airline.\nRead more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/).*\n",[917,2908,9,2868],"DevOps platform",{"slug":2910,"featured":6,"template":693},"southwest-looking-to-help-developers-take-flight","content:en-us:blog:southwest-looking-to-help-developers-take-flight.yml","Southwest Looking To Help Developers Take Flight","en-us/blog/southwest-looking-to-help-developers-take-flight.yml","en-us/blog/southwest-looking-to-help-developers-take-flight",{"_path":2916,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2917,"content":2920,"config":2925,"_id":2927,"_type":14,"title":2928,"_source":16,"_file":2929,"_stem":2930,"_extension":19},"/en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo",{"noIndex":6,"title":2918,"description":2919},"Speed meets governance: Model Selection comes to GitLab Duo","Duo Model Selection provides LLM control for each GitLab Duo feature, enabling organizations to adopt AI while meeting strict governance and compliance standards.",{"title":2918,"description":2919,"authors":2921,"heroImage":929,"date":2922,"body":2923,"category":10,"tags":2924},[1300],"2025-06-25","New AI models are released almost daily, each with unique capabilities, performance characteristics, and compliance implications. At GitLab, we're committed to delivering cutting-edge AI capabilities by [continuously integrating the latest and highest-performing models as they become available](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/). However, we know this fast pace can create complex challenges for enterprise organizations whose model usage is subject to strict governance, compliance, and security standards.\n\nMeet [GitLab Duo Model Selection](https://docs.gitlab.com/user/gitlab_duo/model_selection/), a powerful new capability that gives teams control over the large language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/)) used in your organization. Available in private beta in the newly released [GitLab 18.1](https://about.gitlab.com/releases/2025/06/19/gitlab-18-1-released/) to all GitLab.com customers using Duo Enterprise, Duo Model Selection makes it easier to maintain governance, compliance, and security standards while helping accelerate innovation with agentic and generative AI. With Duo Model Selection, organizations can adopt GitLab Duo faster by selecting models from their pre-approved vendor list, versus the GitLab default model.\n\n## The benefits of GitLab Duo Model Selection\n\nDuo Model Selection gives GitLab.com namespace owners control over which AI models teams can use across different GitLab Duo features, though those without specialized requirements are recommended to use the GitLab default model. With Duo Model Selection, you can:\n\n* **Configure models at the organization level:** Set AI model preferences that apply across your organization’s entire namespace, ensuring consistent governance and compliance standards. Namespace owners can select models approved by their organization from GitLab's validated model catalog.\n\n* **Control models per GitLab Duo feature:** Different GitLab Duo features can use different models based on your specific needs.\n\nWatch Duo Model Selection in action:\n\n\u003Cdiv style=\"padding:62.21% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1094452473?autoplay=1\"badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Duo Model Selection Demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Join the Duo Model Selection private beta\n\nReady to take control of your AI governance? Duo Model Selection is currently in private beta for all GitLab.com customers using Duo Enterprise. To join the private beta, reach out to your GitLab account team. If you don’t have Duo, [sign up for a GitLab Duo trial](https://about.gitlab.com/gitlab-duo/#free-trial) today!\n> Find out everything that's new and exciting, including agentic AI capabilities, in GitLab 18 with our [on-demand launch event](https://about.gitlab.com/eighteen/).",[9,738],{"featured":91,"template":693,"slug":2926},"speed-meets-governance-model-selection-comes-to-gitlab-duo","content:en-us:blog:speed-meets-governance-model-selection-comes-to-gitlab-duo.yml","Speed Meets Governance Model Selection Comes To Gitlab Duo","en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo.yml","en-us/blog/speed-meets-governance-model-selection-comes-to-gitlab-duo",{"_path":2932,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2933,"content":2938,"config":2942,"_id":2944,"_type":14,"title":2945,"_source":16,"_file":2946,"_stem":2947,"_extension":19},"/en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"title":2934,"description":2935,"ogTitle":2934,"ogDescription":2935,"noIndex":6,"ogImage":847,"ogUrl":2936,"ogSiteName":706,"ogType":707,"canonicalUrls":2936,"schema":2937},"Streamline DevSecOps engineering workflows with GitLab Duo","Learn all the ways GitLab Duo's AI capabilities can improve the efficiency of development workflows. Includes in-depth tutorials and demos.","https://about.gitlab.com/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline DevSecOps engineering workflows with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":2934,"description":2935,"authors":2939,"heroImage":847,"date":2282,"body":2940,"category":10,"tags":2941},[1722],"It's 9 a.m. somewhere, and a DevOps engineer is starting their day. They check their [GitLab todo list](https://docs.gitlab.com/ee/user/todos.html) to see any mentions or tasks assigned to them, collaborating with other stakeholders in their organization. These tasks can include:\n\n- managing infrastructure\n- maintaining the configuration of resources\n- maintaining CI/CD pipelines\n- automating processes for efficiency\n- maintaining monitoring and alerting systems\n- ensuring applications are securely built and deployed\n- modernizing applications with containerization\n\nTo carry out these tasks, DevOps engineers spend a lot of time reading documentation, writing configuration files, and searching for help in forums, issues boards, and blogs. Time is spent studying and understanding concepts, and how tools and technologies work. When they don't work as expected, a lot more time is spent investigating why. New tools are released regularly to solve niche or existing problems differently, which introduces more things to learn and maintain context for.\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of capabilities, fits into the workflow of DevSecOps engineers, enabling them to reduce time spent solving problems while increasing their efficiency.\n\nLet's explore how GitLab Duo helps streamline workflows.\n\n## Collaboration and communication\n\nDiscussions or requests for code reviews require spending time reading comments from everyone and carefully reviewing the work shared. GitLab Duo capabilities like Discussion Summary, Code Review Summary, and Merge Request Summary increase the effectiveness of collaboration by reducing the time required to get caught up on activities and comments, with more time spent getting the actual work done.\n\n### Merge Request Summary  \n\nWriting a detailed and clear summary of the change a merge request introduces is crucial for every stakeholder to understand what, why, and how a change was made. It's more difficult than it sounds to effectively articulate every change made, especially in a large merge request. [Merge Request Summary](https://docs.gitlab.com/ee/user/project/merge_requests/duo_in_merge_requests.html#generate-a-description-by-summarizing-code-changes) analyzes the change's diff and provides a detailed summary of the changes made.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=1i2pkyqXZGn2dSbd\" title=\"GitLab Duo Chat is now aware of Merge Requests\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Discussion Summary\n\nImagine getting pulled into an issue with more than 100 comments and a lengthy description, with different perspectives and opinions shared. GitLab Duo [Discussion Summary](https://docs.gitlab.com/ee/user/discussions/index.html#summarize-issue-discussions-with-duo-chat) summarizes all the conversations in the issue and identifies tasks that need to be done, reducing time spent. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IcdxLfTIUgc?si=WXlINow3pLoKHBVM\" title=\"GitLab Duo Dicussion Summary\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Code Review Summary\n\nA merge request has been assigned to a DevOps engineer for review in preparation for deployment, and they have spent time reviewing several parts of the change with multiple comments and suggestions. When [submitting a review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), a text box is presented to summarize the review, which often requires taking a pause and articulating the review. With [Code Review Summary](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), they get a concise summary automatically drafted leading to efficiency.\n\n## Manage infrastructure changes\n\nPart of a DevOps engineer's workflow is managing infrastructure changes. Infrastructure as code ([IaC](https://docs.gitlab.com/ee/user/infrastructure/iac/)) revolutionized this process, allowing for documentation, consistency, faster recovery, accountability, and collaboration. A challenge with IaC is understanding the requirements and syntax of the chosen tool and provider where the infrastructure will be created. A lot of time is then spent reviewing documentation and tweaking configuration files until they meet expectations. \n\nWith GitLab Duo [Code Explanation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html#code-explanation) and [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html), you can prompt GitLab Duo to create configuration files in your tool of choice and learn about the syntax of those tools. With Code Suggestions, you can either leverage [code generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-generation), where you prompt GitLab Duo to generate the configuration, or code completion, which provides suggestions as you type while maintaining the context of your existing configurations.\n\nAs of the time this article was published, Terraform is [supported by default](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#supported-languages) with the right extensions for your IDEs. Other technologies can be supported with [additional language support configuration](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) for the [GitLab Workflow extension](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/index.html).\n\nWhere a technology is not officially supported, [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html) is the powerful AI assistant that can help generate, explain, clarify, and troubleshoot your configuration, while maintaining context from selected text or opened files. Here are two demos where GitLab Duo helped create IaC with Terraform and AWS CloudFormation.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ?si=Bu9jyQWwuSUcw8vr\" title=\"Manage your Infrastructure with Terraform and AI using GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Deploying AWS Lambda function using AWS Cloudformation with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Configuration management\n\nOnce your infrastructure is up, GitLab Duo Chat can also help create configuration files and refactor existing ones. These can be Ansible configurations for infrastructure or cloud-native configurations using Docker, Kubernetes, or Helm resource files. In the videos below, I demonstrate how GitLab Duo helps with Ansible, containerization, and application deployment to Kubernetes.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/t6ZCq_jkBwY?si=awCUdu1wCgOO21XR\" title=\"Configuring your Infrastructure with Ansible & GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Containerizing your application with GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uroSxvMFqPU?si=GMNC7f2b7i_cjn6F\" title=\"Deploying your application to Kubernetes with Help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9yGDM00RlUA?si=kE5JZD_OEFcxeR7E\" title=\"Deploying to Kubernetes using Helm with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Test, test, test\n\nWriting tests is an important part of building secure software, but it can be a chore and often becomes an afterthought. You can leverage the power of GitLab Duo to [generate tests for your code](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) by highlighting your code and typing the `/tests` in the Chat panel of your IDE.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zWhwuixUkYU?si=wI93j90PIiUMyGcV\" title=\"GitLab Duo Test Generation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD pipeline troubleshooting\n\nAutomation is an essential part of the DevOps engineer's workflow, and Continuous Integration/Deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) is central to this. You can trigger CI jobs on code push, merge, or on schedule. But, when jobs fail, you spend a lot of time reading through the logs to identify why, and for cryptic errors, it can take more time to figure out. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/) analyzes your failed job log and errors, and then recommends possible fixes. This reduces the time spent investigating the errors and finding a fix.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Sa0UBpMqXgs?si=IyR-skz9wJMBSicE\" title=\"GitLab Duo Root Cause Analysis\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Building secure applications\n\nPart of software development includes discovering vulnerabilities, either in the application or its dependencies. Some vulnerabilities are easy to fix, while others require creating a milestone with planning. GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) reduce the time spent researching and fixing vulnerabilities. Vulnerability Explanation explains why a vulnerability is happening, its impact, and how to fix it, helping the DevOps engineer to upskill. Vulnerability Resolution takes it further – instead of just suggesting a fix, it creates a merge request with a fix for the vulnerability for you to review. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MMVFvGrmMzw?si=Fxc4SeOkCBKwUk_k\" title=\"GitLab Duo Vulnerability Explanation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=XT3Qz5SsX-ISfCyq\" title=\"GitLab Duo Vulnerability resolution\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More work done with less stress\n\nWith GitLab Duo, DevOps engineers can do more work deploying and maintaining secure applications, while acquiring more skills with the detailed responses from GitLab Duo Chat.\n\n> [Sign up for a free trial of GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started today!",[9,109,716,496,737,738],{"slug":2943,"featured":91,"template":693},"streamline-devsecops-engineering-workflows-with-gitlab-duo","content:en-us:blog:streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","Streamline Devsecops Engineering Workflows With Gitlab Duo","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"_path":2949,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2950,"content":2955,"config":2961,"_id":2963,"_type":14,"title":2964,"_source":16,"_file":2965,"_stem":2966,"_extension":19},"/en-us/blog/summarize-issues",{"title":2951,"description":2952,"ogTitle":2951,"ogDescription":2952,"noIndex":6,"ogImage":868,"ogUrl":2953,"ogSiteName":706,"ogType":707,"canonicalUrls":2953,"schema":2954},"ML experiment: Summarizing issue comments","Learn how GitLab is experimenting with ML-powered issue comment summarization in this fifth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/summarize-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarizing issue comments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Melissa Ushakov\"},{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-13\",\n      }",{"title":2951,"description":2952,"authors":2956,"heroImage":868,"date":2958,"body":2959,"category":10,"tags":2960},[2957,1455],"Melissa Ushakov","2023-04-13","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\n[GitLab issues](https://docs.gitlab.com/ee/user/project/issues/) are essential for team collaboration and serve as the source of truth for teams to align on the problem definition and scope of work for ongoing efforts. As teams collaborate on issues to refine them, the volume of comments grows. For issues with many comments, it can be challenging to understand the status of work at a glance. You may need to spend significant time reading comments to get an overview of the decisions made so far and to understand, for example, if there are any blockers.\n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to summarize text like that found in issue comments. We believe that this technology will help teams use their time more efficiently and help prevent losing track of important information within issue comments.\n\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language.\n\nIn a rapid prototype, our own [Alexandru Croitor](https://gitlab.com/acroitor), Senior Backend Engineer, and [Nicolas Dunlar](https://gitlab.com/nicolasdular), Senior Frontend Engineer for our [Plan stage](https://handbook.gitlab.com/handbook/product/categories/#plan-stage), leverage generative AI LLMs to power comment summarization within [GitLab's issues](https://docs.gitlab.com/ee/user/project/issues/).\n\n![Prototype UX for comment summary](https://about.gitlab.com/images/blogimages/Issue_comment_summary_blog.gif){: .shadow}\n\nAbove, you can see an example of triggering the summarization of issue comments. Watch the full demo below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/GMr3eHwbYAI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nOur experiment takes an individual's natural language comments, inferences them against a generative AI LLM, and through novel prompt engineering (the task of guiding LLM output through instructions), creates a summary of long comment threads. Part of our engineering exploration is examining how to chunk extremely long comment threads into parsable bits an LLM can succinctly and accurately summarize.\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with summarization of issue comments, and are working to optimize prompts to provide more meaningful summaries. We are also investigating bringing this functionality to other objects like epics and merge requests.\n\nThis experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[917,738,717,9],{"slug":2962,"featured":6,"template":693},"summarize-issues","content:en-us:blog:summarize-issues.yml","Summarize Issues","en-us/blog/summarize-issues.yml","en-us/blog/summarize-issues",{"_path":2968,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2969,"content":2974,"config":2979,"_id":2981,"_type":14,"title":2982,"_source":16,"_file":2983,"_stem":2984,"_extension":19},"/en-us/blog/summarize-my-merge-request-review",{"title":2970,"description":2971,"ogTitle":2970,"ogDescription":2971,"noIndex":6,"ogImage":868,"ogUrl":2972,"ogSiteName":706,"ogType":707,"canonicalUrls":2972,"schema":2973},"ML experiment: Summarize my merge request review","Learn how GitLab is experimenting with ML-powered merge request review summaries in this latest installment of our ongoing 'AI/ML in DevSecOps' series.","https://about.gitlab.com/blog/summarize-my-merge-request-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarize my merge request review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-05-18\",\n      }",{"title":2970,"description":2971,"authors":2975,"heroImage":868,"date":2976,"body":2977,"category":10,"tags":2978},[1493],"2023-05-18","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nDuring the course of reviewing a merge request, you may sometimes leave many comments. Those comments may have specific information about things that need to be changed, or context for why you're leaving feedback on the proposed changes. If you've left a lot of comments, it might be hard to remember everything you've said and what the author should look at to resolve your feedback.\n\nIn a rapid prototype, [Stanislav Lashmanov](https://gitlab.com/slashmanov), Senior Frontend Engineeer for our [Code Review Group](https://handbook.gitlab.com/handbook/product/categories/#code-review-group), used AI/ML to summarize your merge request review when [submitting your review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#submit-a-review). He developed a new AI action that provides a summary, and allows you to edit or revise prior to submitting the review:\n\n![Summarize my merge request review via AI](https://about.gitlab.com/images/blogimages/summarize-my-merge-request-review-ai.gif){: .shadow}\n\nProviding authors with these review summaries allows them to quickly understand the feedback and scope of revisions required without the need to process the entire review. This helps to speed up the cycle time for teams as they work through review rounds in merge requests.\n\n## Iterating on AI/ML features\n\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We'll continue to refine the type of review feedback we provide, and then look at how we can better integrate these summaries in to the review cycle. You can see some of our [design efforts](https://gitlab.com/gitlab-org/gitlab/-/issues/408307) we've been exploring with the [summarizing merge request changes](/blog/merge-request-changes-summary-ai/) feature to get an idea of our possible direction.\n\nThis experiment is just the start of the ways we're infusing GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[716,738,717,9],{"slug":2980,"featured":6,"template":693},"summarize-my-merge-request-review","content:en-us:blog:summarize-my-merge-request-review.yml","Summarize My Merge Request Review","en-us/blog/summarize-my-merge-request-review.yml","en-us/blog/summarize-my-merge-request-review",{"_path":2986,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":2987,"content":2993,"config":2998,"_id":3000,"_type":14,"title":3001,"_source":16,"_file":3002,"_stem":3003,"_extension":19},"/en-us/blog/supercharge-productivity-with-gitlab-duo",{"title":2988,"description":2989,"ogTitle":2988,"ogDescription":2989,"noIndex":6,"ogImage":2990,"ogUrl":2991,"ogSiteName":706,"ogType":707,"canonicalUrls":2991,"schema":2992},"Supercharge productivity with generative AI and GitLab Duo","Learn how the GitLab DevSecOps platform leverages generative AI and large language models to enable organizations to boost productivity and efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674838/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/supercharge-productivity-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Supercharge productivity with generative AI and GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-07-20\",\n      }",{"title":2988,"description":2989,"authors":2994,"heroImage":2990,"date":2995,"body":2996,"category":10,"tags":2997},[1722],"2023-07-20","\nIn GitLab's [2023 Global DevSecOps Report](https://about.gitlab.com/developer-survey/), developer productivity and operational efficiency were top of mind for most respondents and ranked as the two top benefits organizations strive for when adopting DevSecOps methodologies. At the same time, artificial intelligence (AI) and machine learning (ML) have become highly relevant to research and development teams in recent years and have gained increased importance in the software development process of many companies. Across the software development lifecycle, generative AI is enabling faster development cycles, less time spent on code reviews, and more secure software development practices.\n\nAt GitLab, we’re using generative AI and ML to deliver productivity and efficiency gains to DevSecOps teams throughout the software development lifecycle. In this blog post, we will look at how the suite of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI capabilities boost productivity. First, let's understand the underlying technology: generative AI.\n\n## What is generative AI?\nGenerative AI is a branch of AI capable of generating new content. The generated content is created by using patterns learned from examples or input data during the training process. One of the most popular architectures used to learn from data is a [transformer model](https://blogs.nvidia.com/blog/what-is-a-transformer-model/). Given the success of transformer models in natural language processing (NLP) and their flexibility in being adapted or fine-tuned to different and more specific tasks, they have been called \"foundation models,\" a term coined by Stanford researchers in an [August 2021 research paper](https://arxiv.org/pdf/2108.07258.pdf).\n\nThese models, when trained on text data, are capable of learning context and meaning by tracking relationships in the input data. A successful application of transformer models is [large language models (LLMs)](https://www.nvidia.com/en-us/glossary/data-science/large-language-models/): algorithms trained with petabyte-scale, text-based data sets to recognize, predict, and generate various forms of content. Examples of LLMs include Google’s [PaLM 2](https://blog.google/technology/ai/google-palm-2-ai-large-language-model/), OpenAI’s [GPT series](https://openai.com/gpt-4) and Meta’s [Llama 2](https://ai.meta.com/llama/). According to [Ark Invest](https://ark-invest.com/big-ideas-2023/artificial-intelligence/), generative AI is expected to increase the productivity of knowledge workers more than fourfold by 2030.\n\nThe GitLab DevSecOps Platform leverages generative AI and LLMs to power [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI capabilities that enable organizations to boost efficiency and ship secure software faster. Now, let’s take a look at the capabilities of GitLab Duo.\n\n## Efficient & secure software development\n\nIn a research paper looking into the impact of AI on developer productivity published in February 2023, [Peng et al.](https://arxiv.org/abs/2302.06590) found that developers who used AI-powered tools completed their tasks 55.8% faster than those who didn’t. In this section, we'll look at how GitLab Duo can help teams reduce development time, improve developer productivity, and ensure secure software development.\n\n### Code Suggestions\nEfficient developer experience is critical to productivity, and GitLab Duo [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html) improves the developer experience and leads to shorter cycle times. If you want to import dependencies in your favourite programming language or create a function along with unit tests, Code Suggestions automatically provides suggestions in your IDE with a high degree of accuracy.\n\nCode Suggestions is currently available in Beta for all tiers on GitLab.com and in self-managed GitLab from Version 16.1. You can [use Code Suggestions with the GitLab WebIDE, Visual Studio Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html), and [other IDEs](https://about.gitlab.com/blog/extending-code-suggestions/).\n\n### Code explanations\nDevelopers spend a ton of time on search engines trying to figure out what a block of code does or why it behaves the way it does. The [\"explain this code\"](https://docs.gitlab.com/ee/user/ai_features.html#explain-selected-code-in-the-web-ui) feature is currently an [experiment](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) on GitLab.com and uses LLMs to explain code in natural language, including context based on the code selected for it to explain.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xzsFfFqvlnU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### AI-generated tests for code changes\nEnsuring code changes are thoroughly tested is hard work and can be time-consuming. In your merge requests, with generative AI, you can generate test files for your code to provide coverage for the change introduced, reducing the time spent writing tests.\n\n![Merge request test coverage with AI](https://about.gitlab.com/images/blogimages/merge-request-generate-tests-ai.gif)\n\n### Vulnerability explanations\nA critical part of preventing the escalation of vulnerabilities is understanding why vulnerabilities were discovered and how to fix them. This takes much effort to research before making an informed decision on the next action step. [GitLab Duo vulnerability recommendations](https://about.gitlab.com/blog/explain-this-vulnerability/) provide detailed information on identified vulnerabilities, including context from your code, how they can be exploited, and example fixes, thus allowing quick remediation of vulnerabilities.\n\n![Explain this vulnerability](https://about.gitlab.com/images/blogimages/2023-04-27-explain-this-vulnerability.png)\n\n### Value stream forecasting\n\nThe efficiency of the software development lifecycle is critical to a team’s productivity and quality of value delivery. It is critical for software leaders to identify trends from events occuring in the lifecycle and implement changes to improve efficiency. GitLab’s [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) allows teams to track software performance and the flow of value across the software development lifecycle with [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html), [value stream analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/index.html), and [vulnerability reports](https://gitlab.com/gitlab-org/gitlab/-/security/vulnerability_report).\n\n## Improved code review processes\n\n[In a 2019 survey](https://static1.smartbear.co/smartbearbrand/media/pdf/the-2019-state-of-code-review.pdf), 55% of respodents were either indifferent to or dissatisfied by their team’s code review process, and this is largely due to code reviews being seen as added work ([necessary but often time-consuming](https://codeclimate.com/blog/time-wasting-code-review)). In this section, we'll look at how GitLab Duo can help reduce review cycles and increase speed.\n\n### Merge request change summary\n\nOnce your changes are ready for review, you create a merge request, which allows you to collaborate with other stakeholders. Merge request descriptions often give the context on why the changes were made, but sometimes don’t include details of the changes themselves, except if you look at the code. Leveraging AI and LLMs, GitLab can provide relevant summaries of merge requests, reducing time spent ensuring the merge request description is up to date as changes evolve. All you need to do is use the `/summarize_diff` [quick action](https://docs.gitlab.com/ee/user/project/quick_actions.html) to add a summary of changes in a comment.\n\n![Merge request changes summary with AI](https://about.gitlab.com/images/blogimages/merge-request-changes-summary-ai.gif)\n\n### Issue comment summary\n\nWhen planning work, discussion is an essential element of collaboration. Discussions can become lengthy and catching up on all the comments can be a challenge, especially in an organization with an asynchronous culture. Similar to summarizing merge request changes, GitLab can [summarize issue comments](https://about.gitlab.com/blog/summarize-issues/), which is valuable in efficiently understanding the status of work from the issue discussions and the next steps to take. Here is a video of how it works:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/GMr3eHwbYAI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Suggested reviewers\nIdentifying reviewers can be time-consuming, especially with cross-functional collaborations in a large team. When enabled by project maintainers or owners, GitLab [suggests reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#suggested-reviewers) for a merge request based on the code changes made and the project’s contribution graph. This leads to higher-quality feedback from team members with domain knowledge and increases the speed of reviews.\n\n### Merge request review summary\nCode reviewers often leave feedback across several parts of the changes they’ve reviewed, then proceed to write a separate comment to give an overview of all the feedback given. The reviewer spends valuable time ensuring the merge request author has the context to understand the review. With GitLab Duo, the reviewer can simply use the [\"summarize my code review\"](/blog/summarize-my-merge-request-review/) capability to provide authors with context around the feedback, without the need to process the entire review.\n\n![Summarize code review](https://about.gitlab.com/images/blogimages/summarize-my-merge-request-review-ai.gif)\n\n## What's coming next\nGitLab is continuously iterating on our [AI-assisted capabilities](https://about.gitlab.com/blog/ai-ml-in-devsecops-series/) to find innovative ways to enable you to efficiently build more secure software faster, while putting privacy first in a single application that gives every stakeholder visibility. This includes AI [experiments](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment) such as [GitLab Duo Chat](https://docs.gitlab.com/ee/user/ai_features.html#gitlab-duo-chat), which uses generative AI to answer product-specific questions about GitLab, reducing time spent reviewing the GitLab documentation. Another experiment is the \"fill in merge request description\" capability, which uses AI and content from the proposed changes to fill in a merge request decription template.\n\n![Summarize MR Descriptions](https://about.gitlab.com/images/blogimages/summarize-mr-description.gif)\n\nGitLab Duo’s AI-assisted workflows enable teams in every phase of the software development lifecycle to deliver secure software faster with increased efficiency and reduced cycle times. Learn more about [GitLab Duo here](https://about.gitlab.com/gitlab-duo/).\n",[716,496,9],{"slug":2999,"featured":6,"template":693},"supercharge-productivity-with-gitlab-duo","content:en-us:blog:supercharge-productivity-with-gitlab-duo.yml","Supercharge Productivity With Gitlab Duo","en-us/blog/supercharge-productivity-with-gitlab-duo.yml","en-us/blog/supercharge-productivity-with-gitlab-duo",{"_path":3005,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3006,"content":3012,"config":3018,"_id":3020,"_type":14,"title":3021,"_source":16,"_file":3022,"_stem":3023,"_extension":19},"/en-us/blog/the-gitlab-ai-security-framework-for-security-leaders",{"title":3007,"description":3008,"ogTitle":3007,"ogDescription":3008,"noIndex":6,"ogImage":3009,"ogUrl":3010,"ogSiteName":706,"ogType":707,"canonicalUrls":3010,"schema":3011},"The GitLab AI Security Framework for security leaders","Discover how GitLab Duo's security controls, third-party integrations, and retention policies help teams safely implement AI into their development workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664299/Blog/Hero%20Images/AdobeStock_887599633.jpg","https://about.gitlab.com/blog/the-gitlab-ai-security-framework-for-security-leaders","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab AI Security Framework for security leaders\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kyle Smith\"},{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2025-03-04\",\n      }",{"title":3007,"description":3008,"authors":3013,"heroImage":3009,"date":3015,"body":3016,"category":10,"tags":3017},[3014,796],"Kyle Smith","2025-03-04","As companies rapidly adopt AI technologies, CISOs face a new frontier of security challenges. Many security leaders find themselves grappling with unfamiliar questions: How do we evaluate AI vendors differently from traditional software vendors? What security controls matter most? Where does vendor responsibility end and customer responsibility begin? How do we evaluate AI security risks within the context of the service provided? To help answer these questions, we’ve created the [GitLab AI Security Framework](https://trust.gitlab.com/?itemUid=ad3d92c1-889e-49fc-b19c-2434f70071ee&source=click) to show security leaders how GitLab and customers enable secure AI-powered development using GitLab Duo.\n\n## The genesis of AI security challenges\n\nFrom conversations with security leaders across industries a pattern has emerged: Organizations are rapidly embracing AI technologies to improve delivery while their security teams struggle to establish appropriate security controls. \n\nThis disconnect isn't just a matter of resources or expertise – it represents a fundamental shift in how organizations need to approach security in the AI era. Security leaders are witnessing quick and unprecedented adoption of AI across their organizations, from development teams using coding assistants to marketing departments leveraging generative AI. \n\nWhile organizations are integrating AI within their own software, many of their current vendor-provided SaaS applications have added AI capabilities as well. Although this adoption drives innovation and efficiency, it also creates a complex set of security considerations that traditional frameworks weren't designed to address. Below are some of the specific challenges we’ve identified.\n\n## Security challenges in the AI era\n\n**1. Responsibility and control uncertainty**\n\nThe rapid pace of AI adoption has left many organizations without a coherent security governance strategy. Security teams find themselves trying to retrofit existing security frameworks to address AI-specific concerns. Security leaders face challenges in understanding where their responsibilities begin and end when it comes to AI security. The traditional vendor-customer relationship becomes more complex with AI systems, as data flows, model training, and inference processes create new types of interactions and dependencies. \n\n**2. Risk assessment evolution**\n\nTraditional security risk models struggle to capture the unique characteristics of AI systems. Security leaders are finding that standard risk assessment frameworks don't adequately address AI-specific risks. AI security risks will differ based on AI implementation and the context in which it’s used. The challenge is compounded by the need to evaluate AI vendors without necessarily having deep technical AI expertise on the security team.\n\n**3. Data protection complexities**  \nAI systems present unique challenges for data protection. The way these systems process, learn from, and generate data creates new privacy and security considerations that organizations should carefully evaluate. CISOs must ensure their data governance frameworks evolve to address how AI systems use and protect sensitive information. AI implementations with inadequate safeguards might inadvertently reveal protected information via AI generated outputs.\n\n**4. Compliance and standards navigation**  \nThe regulatory landscape for AI security is rapidly evolving, with new standards like ISO 42001 and others emerging alongside existing frameworks. Security leaders must navigate this complex environment while ensuring their AI implementations remain compliant with both current and anticipated regulations. This requires a delicate balance between enabling AI adoption and maintaining robust security controls that satisfy regulatory requirements.\n\n## Addressing these challenges  \nWith the release of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), we recognized these executive-level concerns and developed a comprehensive framework to help organizations navigate AI security in the context of our AI-powered DevSecOps platform. Our AI Security Framework provides details on our privacy-first implementation of AI to enable GitLab Duo, and how we validate the security of our AI vendors. A responsibility matrix is included to help security leaders manage their AI security responsibilities while enabling their organizations to innovate safely. We also compiled a selection of AI-specific security risks to keep in mind and highlighted how GitLab capabilities like [prompt guardrails](https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers/) can help in mitigating them. \n\n> Want a deeper look at our security controls? Check out our [AI Security Framework](https://trust.gitlab.com/?itemUid=ad3d92c1-889e-49fc-b19c-2434f70071ee&source=click).\n\n## Learn more\n- [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/)\n- [How GitLab uses prompt guardrails to help protect customers](https://about.gitlab.com/blog/how-gitlab-uses-prompt-guardrails-to-help-protect-customers/)\n- [Improve AI security in GitLab with composite identities](https://about.gitlab.com/blog/improve-ai-security-in-gitlab-with-composite-identities/)\n- [Secure, compliant, and AI-powered: Get to know 3 new GitLab features](https://about.gitlab.com/blog/secure-compliant-and-ai-powered-get-to-know-3-new-gitlab-features/)\n- [ICYMI: Key AI and security insights from our developer community](https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community/)",[9,717,496,691],{"slug":3019,"featured":91,"template":693},"the-gitlab-ai-security-framework-for-security-leaders","content:en-us:blog:the-gitlab-ai-security-framework-for-security-leaders.yml","The Gitlab Ai Security Framework For Security Leaders","en-us/blog/the-gitlab-ai-security-framework-for-security-leaders.yml","en-us/blog/the-gitlab-ai-security-framework-for-security-leaders",{"_path":3025,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3026,"content":3032,"config":3037,"_id":3039,"_type":14,"title":3040,"_source":16,"_file":3041,"_stem":3042,"_extension":19},"/en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"title":3027,"description":3028,"ogTitle":3027,"ogDescription":3028,"noIndex":6,"ogImage":3029,"ogUrl":3030,"ogSiteName":706,"ogType":707,"canonicalUrls":3030,"schema":3031},"The road to smarter code reviewer recommendations","Machine learning is coming to GitLab's code review process. Here's what you need to know, and how you can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668426/Blog/Hero%20Images/retrospectivesgitlabpost.jpg","https://about.gitlab.com/blog/the-road-to-smarter-code-reviewer-recommendations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to smarter code reviewer recommendations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-04\",\n      }",{"title":3027,"description":3028,"authors":3033,"heroImage":3029,"date":3034,"body":3035,"category":1201,"tags":3036},[1455],"2022-01-04","\nYou may recall back in June 2021, we [announced the acquisition of UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities/), a machine learning (ML) based solution for automatically identifying appropriate expert [code reviewers](/stages-devops-lifecycle/create/) and controlling review workloads and distribution of knowledge.\n\nAt the start of the new year we wanted to provide an update on our integration progress and our wider vision of leveraging machine learning to make GitLab's [DevOps Platform](/solutions/devops-platform/) smarter. You see, the acquisition of UnReview also was the initial staffing of [our new ModelOps stage](/direction/modelops/).\n\n### Our Newest DevOps Stage\n\nThis new stage, which we’ve named ModelOps, is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab both within existing features to make them smarter and more intelligent, but also empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nSo what is ModelOps you may wonder? We view ModelOps as an all encompassing term to cover the entire end to end lifecycle of artificial intelligence models. We wanted to set our vision wide to fully cover everything needed to power data science workloads. DataOps is the processing of data workloads (think traditional ELT: extract, load, transform) and MLOps is the building, training, and deployment of machine learning models. If you’re confused don’t worry, it’s a lot to wrap your head around.\n\n![a look at the stages of MLOps](https://about.gitlab.com/images/blogimages/MLops.png){: .shadow.small.center}\n\nToday our DevOps Platform helps plan, build, test, secure, deploy, and monitor traditional software. Now we want to extend our DevOps Platform to include AI and ML workloads. If this is interesting to you, be sure to check out our recent Contribute talk where we dive deeper into plans for our ModelOps stage.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### UnReview as our first feature\n\nSo what does this have to do with UnReview? Our acquisition of UnReview is going to be our first [AI Assisted](/direction/ai-powered/) group’s feature: suggested reviewers within [GitLab’s existing reviewers experience](/blog/merge-request-reviewers/). Today, a developer in a merge request has to manually choose a reviewer to look at their code. With UnReview we can leverage the contribution history for a project and recommend someone well-suited for code review of your specific changes.\n\nHere’s an early mockup (and it may differ from our final UI) of how we’re thinking about this integration:\n\n![an early mockup of our UI](https://about.gitlab.com/images/blogimages/codereviewmockup.png){: .shadow.small.left}\n\nThe UnReview algorithm looks at a variety of data points from your project’s contribution history to suggest an appropriate reviewer. We’re still in the early days of this integration but our initial internal testing shows great suggestions.\n\n### Customer beta coming soon!\n\nThis leads me to a final question, might you want to be one of our first customers to try this new code review experience? In early 2022, we’ll begin a private customer beta of this new functionality. If interested, [fill out this form to express interest](https://docs.google.com/forms/d/e/1FAIpQLScpmCwpwyBr0GrXxBQ6vE02eokclFAs9lFk_g5dcyuGaHqFuQ/viewform). Do note that we can’t accept everyone and we’ll focus initially on customer profiles that are well suited for the initial version of the suggestion algorithm. Our only ask is we’d like to find customers with active projects that have a healthy number of contributors. The model currently works best on larger repositories with lots of contributors where it may not immediately be clear who is an ideal code reviewer.\n\nWe can’t wait for customers to begin using this new reviewer suggestion experience and will be providing more updates in early 2022.\n",[917,233,759,9],{"slug":3038,"featured":6,"template":693},"the-road-to-smarter-code-reviewer-recommendations","content:en-us:blog:the-road-to-smarter-code-reviewer-recommendations.yml","The Road To Smarter Code Reviewer Recommendations","en-us/blog/the-road-to-smarter-code-reviewer-recommendations.yml","en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"_path":3044,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3045,"content":3051,"config":3056,"_id":3058,"_type":14,"title":3059,"_source":16,"_file":3060,"_stem":3061,"_extension":19},"/en-us/blog/there-is-no-mlops-without-devsecops",{"title":3046,"description":3047,"ogTitle":3046,"ogDescription":3047,"noIndex":6,"ogImage":3048,"ogUrl":3049,"ogSiteName":706,"ogType":707,"canonicalUrls":3049,"schema":3050},"Building GitLab with GitLab: Why there is no MLOps without DevSecOps","Follow along as data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Why there is no MLOps without DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2023-10-05\",\n      }",{"title":3046,"description":3047,"authors":3052,"heroImage":3048,"date":3053,"body":3054,"category":10,"tags":3055},[2473],"2023-10-05","Building predictive models requires a good amount of experimentation and\niterations. Data scientists building those models usually implement\nworkflows involving several steps such as data loading, processing,\ntraining, testing, and deployment. Such workflows or data science pipelines\ncome with a set of challenges on their own; some of these common challenges\nare:\n\n- prone to error due to manual steps\n\n- experimentation results that are hard to replicate\n\n- long training time of machine learning (ML) models \n\n\nWhen there is a challenge, there is also an opportunity; in this case, those\nchallenges represent an opportunity for data scientists to adopt DevSecOps\npractices and enjoy the benefits of automation, repeatable workflows,\nstandardization, and automatic provisioning of infrastructure needed for\ndata-driven applications at scale.\n\n\nThe [Data Science team at\nGitLab](https://about.gitlab.com/handbook/business-technology/data-team/organization/data-science/)\nis now utilizing the GitLab DevSecOps Platform in their workflows,\nspecifically to:\n\n- enhance experiment reproducibility by ensuring code and data execute in a\nstandardized container image\n\n- automate training and re-training of ML models with GPU-enabled CI/CD\n\n- leverage ML experiment tracking, storing the most relevant metadata and\nartifacts produced by data science pipelines automated with CI\n\n\nAt GitLab, we are proponents of \"dogfooding\" our platform and sharing how we\nuse GitLab to build GitLab. What follows is a detailed look at the Data\nScience team's experience.\n\n\n### Enhancing experiment reproducibility \n\nA baseline step to enhance reproducibility is having a common and standard\nexperiment environment for all data scientists to run experiments in their\nJupyter Notebooks. A standard data science environment ensures that all team\nmembers use the same software dependencies. A way to achieve this is by\nbuilding a container image with all the respective dependencies under\nversion control and re-pulling it every time a new version of the code is\nrun. This process is illustrated in the figure below:\n\n\n![build](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/build-2.png)\n\nData science image of automatic build using GitLab CI \n\n{: .note.text-center}\n\n\nYou might wonder if the image gets built every time there is a new commit.\nThe answer is \"no\" since that would result in longer execution times, and\nthe image dependencies versions don’t change frequently, rendering it\nunnecessary to build it every time there is a new commit. Therefore, once\nthe standard image is automatically built by the pipeline, it is pushed to\nthe GitLab Container Registry, where it is stored and ready to be pulled\nevery time changes to the model code are introduced, and re-training is\nnecessary.\n\n\n![registry](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/registry.png)\n\nGitLab Container Registry with image automatically built and pushed by a CI\npipeline\n\n{: .note.text-center}\n\n\nChanges to the image dependencies or Dockerfile require a [merge\nrequest](https://docs.gitlab.com/ee/user/project/merge_requests/) and an\napproval process.\n\n\n### How to build the data science image using GitLab CI/CD\n\nConsider this project structure:\n\n\n```\n\nnotebooks/\n\n.gitlab-ci.yml\n\nDockerfile\n\nconfig.yml\n\nrequirements.txt\n\n```\n\nGitLab's Data Science team already had a pre-configured JupyterLab image\nwith packages such as [gitlabds](https://pypi.org/project/gitlabds/1.0.0/)\nfor common data preparation tasks and modules to enable Snowflake\nconnectivity for loading raw data. All these dependencies are reflected in\nthe Dockerfile at the root of the project, plus all the steps necessary to\nbuild the image: \n\n\n```\n\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n\nCOPY .    /app/\n\nWORKDIR /app\n\nRUN apt-get update\n\nRUN apt-get install -y python3.9\n\nRUN apt-get install -y python3-pip\n\nRUN pip install -r requirements.txt\n\n```\n\n\nThe instructions to build the data science image start with using Ubuntu\nwith CUDA drivers as a base image. We are using this baseline image because,\nmoving forward, we will use GPU hardware to train models. The rest of the\nsteps include installing Python 3.9 and the dependencies listed in\n`requirements.txt` with their respective versions. \n\n\nAutomatically building the data science image using [GitLab\nCI/CD](https://about.gitlab.com/topics/ci-cd/) requires us to create the\n`.gitlab-ci.yml ` at the root of the project and use it to describe the jobs\nwe want to automate. For the time being, let’s focus only on the\n`build-ds-image`job:\n\n\n```\n\nvariables:\n  DOCKER_HOST: tcp://docker:2375\n  MOUNT_POINT: \"/builds/$CI_PROJECT_PATH/mnt\"\n  CONTAINER_IMAGE: \"$CI_REGISTRY_IMAGE/main-image:latest\"\n\nstages:\n    - build\n    - train\n    - notify\ninclude:\n  - template: 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml'\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS\n      when: never\n\nbuild-ds-image:\n  tags: [ saas-linux-large-amd64 ]\n  stage: build\n  services:\n    - docker:20.10.16-dind\n  image:\n    name: docker:20.10.16\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker build -t $CONTAINER_IMAGE .\n    - docker push $CONTAINER_IMAGE\n  rules:\n    - if: '$CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH'\n      changes:\n       - Dockerfile\n       - requirements.txt\n\n  allow_failure: true\n```\n\n\nAt a high level, the job `build-ds-image`:\n\n- uses a docker-in-docker service (dind) necessary to create docker images\nin GitLab CI/CD.\n\n- uses [predefined variables](link) to log into the GitLab Container\nRegistry, build the image, tag it using $CONTAINER_IMAGE variable, and push\nit to the registry. These steps are declared in the script section lines.\n\n- leverages a  `rules` section to evaluate conditions to determine if the\njob should be created. In this case, this job runs only if there are changes\nto the Dockerfile and requirements.txt file and if those changes are created\nusing a merge request.\n\n\nThe conditions declared in `rules` helps us optimize the pipeline running\ntime since the image gets rebuilt only when necessary.\n\n\nA complete pipeline can be found in this example project, along with\ninstructions to trigger the automatic creation of the data science image:\n[Data Science CI\npipeline](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/.gitlab-ci.yml?ref_type=heads).\n\n\n### Automate training and re-training of ML models with GPU-enabled CI/CD\n\nGitLab offers the ability to leverage GPU hardware and, even better, to get\nthis hardware automatically provisioned to run jobs declared in the\n.gitlab-ci.yml file. We took advantage of this capability to train our ML\nmodels faster without spending time setting up or configuring graphics card\ndrivers. Using GPU hardware ([GitLab\nRunners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html))\nrequires us to add this line to the training job: \n\n\n```\n\ntags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\n\nThe tag above will ensure that a GPU GitLab Runner automatically picks up\nevery training job.\n\nLet’s take a look at the entire training job in the .gitlab-ci.yml file and\nbreak down what it does:\n\n\n```\n\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n    script:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n    rules:\n        - if: '$CI_COMMIT_BRANCH == \"staging\"'\n          when: never\n        - if: $CI_COMMIT_MESSAGE =~ /\\w+\\.ipynb/\n          when: always\n          allow_failure: true\n    artifacts:\n      paths:\n        - ./model_metrics.md\n````\n\n\nLet’s start with this block:\n\n\n```\n\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\n\n- **train-commit-activated** This is the name of the job. Since the model\ntraining gets activated given a specific pattern in the commit message, we\nuse a descriptive name to easily identify it in the larger pipeline.\n\n- **stage: train** This specifies the pipeline stage where this job belongs.\nIn the first part of the CI/CD configuration, we defined three stages for\nthis pipeline: `build`, `train`,  and `notify`. This job comes after\nbuilding the data science container image. The order is essential since we\nfirst need the image built to run our training code in it.\n\n- **image: $CONTAINER_IMAGE** Here, we specify the Docker image built in the\nfirst job that contains the CUDA drivers and necessary Python dependencies\nto run this job. $CONTAINER_IMAGE is a user-defined variable specified in\nthe variables section of the .gitlab-ci.yml file. \n\n- **tags: saas-linux-medium-amd64-gpu-standard** As mentioned earlier, using\nthis line, we ask GitLab to automatically provision a GPU-enabled Runner to\nexecute this job.\n\n\nThe second block of the job:\n\n\n```\n\nscript:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\n\n- **script** This section contains the commands in charge of running the\nmodel training. The execution of this job is conditioned to the contents of\nthe  commit message. The commit message must have the name of the Jupyter\nNotebook that contains the actual model training code.\n\n\nThe rationale behind this approach is that we wanted to keep the data\nscientist workflow as simple as possible. The team had already adopted the\n[modeling\ntemplates](https://gitlab.com/gitlab-data/data-science/-/tree/main/templates)\nto start building predictive models quickly. Plugging the CI pipeline into\ntheir modeling workflow was a priority to ensure productivity would remain\nintact. With these steps:\n\n\n```\n\nnotebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\n\nThe CI pipeline captures the name of the Jupyter Notebook with the training\nmodeling template and passes parameters to ensure\n[XGBoost](https://xgboost.readthedocs.io/en/stable/) uses the provisioned\nGPU. You can find an example of the Jupyter modeling template that is\nexecuted in this job\n[here](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/notebooks/training_example.ipynb?ref_type=heads).\n\n\nOnce the data science image is built, it can be reutilized in further model\ntraining jobs. The `train-commit-activated` job pulls the image from the\nGitLab Container Registry and utilizes it to run the ML pipeline defined in\nthe training notebook. This is illustrated in the `CI Job - Train model` in\nthe figure below:\n\n\n![training](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_job.png)\n\nTraining job executes ML pipeline defined in the modeling notebook\n\n{: .note.text-center}\n\n\nSince our image contains CUDA drivers and GitLab automatically provisions\nGPU-enabled hardware, the training job runs significantly faster with\nrespect to standard hardware.\n\n\n### Using GitLab ML experiment tracker\n\nEach model training execution triggered using GitLab CI is an experiment\nthat needs tracking. Using Experiment tracking in GitLab helps us to record\nmetadata that comes in handy to compare model performance and collaborate\nwith other data scientists by making result experiments available for\neveryone and providing a detailed history of the model development.\n\n\n![experiments](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/experiments.png)\n\nExperiments automatically logged on every CI pipeline GPU training run \n\n{: .note.text-center}\n\n\nEach model artifact created can be traced back to the pipeline that\ngenerated it, along with its dependencies:\n\n\n![traceability](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/traceability_small.png)\n\nModel traceability from pipeline run to candidate details\n\n{: .note.text-center}\n\n\n### Putting it all together\n\nWhat is machine learning without data to learn from? We also leveraged the\n[Snowflake](https://www.snowflake.com/en/) connector in the model training\nnotebook and automated the data extraction whenever the respective commit\ntriggers a training job. Here is an architecture of the current solution\nwith all the parts described in this blog post:\n\n\n![process](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_fixed.png)\n\nData Science pipelines automated using GitLab DevSecops Platform\n\n{: .note.text-center}\n\n\n| Challenge | Solution |\n\n| ------ | ------ | \n\n|Prone to error due to manual steps | Automate steps with [GitLab\nCI/CD](https://docs.gitlab.com/ee/ci/) |\n\n|Experimentation results that are hard to replicate    |  Record metadata\nand model artifacts with [GitLab Experiment\nTracker](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)   \n|\n\n|The long training time of machine learning models     |  Train models with\n[GitLab SaaS GPU\nRunners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html)  |\n\n\nIterating on these challenges is a first step towards MLOps, and we are at\nthe tip of the iceberg; in coming iterations, we will adopt security\nfeatures to ensure model provenance (software bill of materials) and code\nquality, and to monitor our ML workflow development with value stream\ndashboards. But so far, one thing is sure: **There is no MLOps without\nDevSecOps**.\n\n\nGet started automating your data science pipelines, follow this\n[tutorial](https://about.gitlab.com/handbook/business-technology/data-team/platform/ci-for-ds-pipelines/)\nand clone this\n[data-science-project](https://gitlab.com/gitlab-data/data-science-ci-example)\nto follow along and watch this demo of using GPU Runners to train\n[XGBoost](https://xgboost.readthedocs.io/en/stable/) model.\n\n\nSee how data scientists can train ML models with GitLab GPU-enabled Runners\n(XGBoost 5-minute demo):\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/tElegG4NCZ0?si=L1IZfx_UGv6u81Gk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## More \"Building GitLab with GitLab\" blogs\n\nRead more of our \"Building GitLab with GitLab\" series:\n\n- [How we use Web API fuzz\ntesting](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n\n- [How GitLab.com inspired GitLab\nDedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n",[737,716,496,9],{"slug":3057,"featured":6,"template":693},"there-is-no-mlops-without-devsecops","content:en-us:blog:there-is-no-mlops-without-devsecops.yml","There Is No Mlops Without Devsecops","en-us/blog/there-is-no-mlops-without-devsecops.yml","en-us/blog/there-is-no-mlops-without-devsecops",{"_path":3063,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3064,"content":3069,"config":3075,"_id":3077,"_type":14,"title":3078,"_source":16,"_file":3079,"_stem":3080,"_extension":19},"/en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"title":3065,"description":3066,"ogTitle":3065,"ogDescription":3066,"noIndex":6,"ogImage":2375,"ogUrl":3067,"ogSiteName":706,"ogType":707,"canonicalUrls":3067,"schema":3068},"Top 10 GitLab technical blogs of 2023","2023 was a big year! Catch up on expert insights into DevSecOps, AI, CI/CD, and more.","https://about.gitlab.com/blog/top-10-gitlab-technical-blogs-of-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab technical blogs of 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2024-01-09\",\n      }",{"title":3065,"description":3066,"authors":3070,"heroImage":2375,"date":3072,"body":3073,"category":1201,"tags":3074},[3071],"Sandra Gittlen","2024-01-09","2023 brought fresh insights from experts across GitLab and beyond —  all of them focused on the challenges and opportunities facing DevSecOps teams. From Lockheed Martin to CARFAX, organizations are trying to understand and unlock the power of technologies such as artificial intelligence (AI), CI/CD, security automation, and more. Our experts provided tips, best practices, and tutorials to use throughout the software development lifecycle.\n\nHere are the top 10 technical blogs from what was an incredible year in DevSecOps innovation.\n\n**1. [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)**\nLooking for a smooth transition from Jenkins to GitLab? Follow this step-by-step tutorial to learn how GitLab's integrated CI/CD capabilities help deliver high-quality software faster.\n\n**2. [U.S. Navy Black Pearl: Lessons in championing DevSecOps](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/)**\nSigma Defense's director of engineering details what it's like to manage the U.S. Navy's Black Pearl, which uses GitLab as its DevSecOps platform. The DevSecOps champion relays his experience implementing DevSecOps and the benefits of that decision.\n\n**3. [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)**\nEnabling developers to work in their preferred environments empowers DevSecOps teams to build and deliver software more efficiently. With these quickstart instructions, developers can create a workspace, use the Web IDE Terminal to install dependencies or start their server, and view their running application.\n\n**4. [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)**\nCI/CD catalogs are a game-changer, allowing developers to discover, integrate, and share pre-existing CI/CD components with ease. This tutorial shows how to get the most from this new DevSecOps platform feature.\n\n**5. [Combine GitLab Flow and GitLab Duo for a workflow powerhouse](https://about.gitlab.com/blog/gitlab-flow-duo/)**\nGitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency that can lead to higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. Find out how with this step-by-step guide.\n\n**6. [Efficient DevSecOps workflows: Hands-on python-gitlab API automation](https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/)**\nThe python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.\n\n**7. [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)**\nAt GitLab, we believe in the power of MLOps, especially when combined with DevSecOps. So follow along as our data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.\n\n**8. [Explore the Dragon Realm: Build a C++ adventure game with a little help from AI](https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)**\nReaders are invited to create a mystical world while learning how to integrate AI into their coding environment. This tutorial demonstrates how to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++. \n\n**9. [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)**\nThe GitLab Red Team conducts security exercises that simulate real-world threats. They apply professional development practices to using the same open source C2 tools as threat actors. In this tutorial, the GitLab Red Team shares how they implement continuous testing for the Mythic framework, their design philosophy, and a public project that can be forked for use by other Red Teams.\n\n**10. [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)**\nThe design of GitLab Dedicated, our single-tenancy SaaS version of the DevSecOps platform, came from the lessons learned while building GitLab.com. In this peek behind the curtains, learn the considerations that sparked different decisions regarding automation, databases, monitoring, availability, and more – and what the outcome was.\n\nSign up for the GitLab newsletter using the form to the right to receive the latest blogs right in your inbox.\n",[9,109,496,716,691,737],{"slug":3076,"featured":91,"template":693},"top-10-gitlab-technical-blogs-of-2023","content:en-us:blog:top-10-gitlab-technical-blogs-of-2023.yml","Top 10 Gitlab Technical Blogs Of 2023","en-us/blog/top-10-gitlab-technical-blogs-of-2023.yml","en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"_path":3082,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3083,"content":3088,"config":3093,"_id":3095,"_type":14,"title":3096,"_source":16,"_file":3097,"_stem":3098,"_extension":19},"/en-us/blog/top-10-ways-machine-learning-may-help-devops",{"title":3084,"description":3085,"ogTitle":3084,"ogDescription":3085,"noIndex":6,"ogImage":3029,"ogUrl":3086,"ogSiteName":706,"ogType":707,"canonicalUrls":3086,"schema":3087},"Top 10 ways machine learning may help DevOps","Is machine learning part of your DevOps plan? Here are some ways ML could fit right in.","https://about.gitlab.com/blog/top-10-ways-machine-learning-may-help-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 ways machine learning may help DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-14\",\n      }",{"title":3084,"description":3085,"authors":3089,"heroImage":3029,"date":3090,"body":3091,"category":2204,"tags":3092},[852],"2022-02-14","_This post is meant as a general introduction to DevOps and machine learning, but does not represent GitLab’s roadmap with ModelOps. Read more about [our ModelOps plans](/blog/introducing-modelops-to-solve-data-science-challenges/)._\n\nLike a superhero’s cape, machine learning can enhance the innate powers of your DevOps program. \n\nYes, it’s early days, and no, machine learning can’t do everything you may want it to – yet. But if you [start using ML tools now](/topics/devops/the-role-of-ai-in-devops/), you’ll be poised to make it a full-fledged participant in your DevOps team as the technology continues to mature. Here are some things ML can help with today.\n\n1. **Make sense of your test data.** Whether it’s regression, unit, functional, or user acceptance testing, ML can help sort through the data generated from those tests, find patterns, figure out the coding problems that caused any bugs, and alert the troops. \n\n2. **Manage your help-desk alerts.** You can teach ML about the factors that make up different types of alerts and automatically route alerts to the best-qualified (mostly human) problem-solver, be it the service desk or a networking guru. Some ML systems can also fix problems without human intervention, based on rules you create.\n\n3. **Put the security into “DevSecOps.”** ML algorithms can, in real time, look through the massive amount of information generated from your security software and network logs and determine if there’s a breach long before a human could. The ML software compares the usual network-traffic baseline to what it’s seeing currently and detects when there’s an attack, or it can tell you if the amount of code in an app or system has suddenly grown to double its size when it shouldn’t have. ML can also triage the problems it finds, as well as take actions to correct security issues based on your guidelines. Further, ML tools can also help ensure your governance rules are followed and create a detailed audit trail.\n\n4. **Gather user requirements.** Natural language processing has come a long way, and can collect, validate, and track documents to streamline the process of figuring out what users are asking for. The technology can also help detect incomplete requirements or wonky timelines and can translate user wants and needs into highly technical project requirements. This makes the entire project-management process more efficient.\n\n5. **Help with pesky dev details.** No, not to replace developers, of course – at least not yet. But ML can learn from past apps you’ve created to recommend security guardrails and how to make software scale and perform better, among other things. Developers definitely see this trend coming, and in [GitLab’s 2021 Global DevSecOps Survey](/developer-survey/), around a third said that an understanding of AI or ML is the most important skill for their future careers. ML-powered code completion tools are already on the market, which provide suggestions for app developers.\n\n6. **Automate testing and create test data.** ML can automatically create the tests you need for QA and the test cases they’re based on, generate and manage test data, and automate code reviews. Natural language processing can help you review test cases and eliminate duplicates, as well as identify gaps in test coverage. Teams will continue to use machine learning models to [make test automation smarter](https://www.forrester.com/blogs/predictions-2021-software-developers-face-mounting-pressure/) , Forrester Research predicts.\n\n7. **Reduce complexity and allow better communication throughout the software chain.** ML can smooth out the rough edges among teams responsible for different parts of the process, and act as an Esperanto of sorts to allow people to speak to each other using the same language. No more, “It worked on my machine.” \n\n8. **Save time on manual provisioning.** Sure the cloud makes this easier, but ML can provision what it thinks you’ll need before you actually need it. \n\n9. **Improve software and product quality.** ML can help find issues like resource leaks, wasted CPU cycles, and other problems, so you can optimize your code before it hits production. At Facebook, [a bug detection tool](https://www2.deloitte.com/us/en/insights/focus/signals-for-strategists/ai-assisted-software-development.html/#:~:text=AI%20is%20helping%20to%20make%20better%20software%20Professionals%20are%20using,in%20design,%20development,%20and%20deployment&text=Artificial%20intelligence%20isn't%20writing,develop%20and%20test%20custom%20software.) predicts defects and suggests remedies that prove correct 80% of the time, Deloitte reports. And the IEEE ran a study from Google X about an ML method that [predicts failures of individual components](https://ieeexplore.ieee.org/document/7448033) that was “far more accurate than the traditional MTBF approach.” \n\n10. **Integrate your workflows and allow continuous improvement.** Some DevOps teams are using ML to analyze all development, operational, and test tools to find any gaps, as well as where pieces of the pipeline need to be better integrated and where APIs are still needed. ML algorithms can help teams figure out why some projects go very well, and others don’t. You can use ML to monitor your monitors and make sure they’re fully operational. Further, ML continues to learn from its training models – both the ones you provide and those it learns on its own as it goes – to continue to help you provide better products and services over time. And when you get down to it, isn’t that the whole point of technology?\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._",[917,834,2014,9],{"slug":3094,"featured":6,"template":693},"top-10-ways-machine-learning-may-help-devops","content:en-us:blog:top-10-ways-machine-learning-may-help-devops.yml","Top 10 Ways Machine Learning May Help Devops","en-us/blog/top-10-ways-machine-learning-may-help-devops.yml","en-us/blog/top-10-ways-machine-learning-may-help-devops",{"_path":3100,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3101,"content":3106,"config":3111,"_id":3113,"_type":14,"title":3114,"_source":16,"_file":3115,"_stem":3116,"_extension":19},"/en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"title":3102,"description":3103,"ogTitle":3102,"ogDescription":3103,"noIndex":6,"ogImage":929,"ogUrl":3104,"ogSiteName":706,"ogType":707,"canonicalUrls":3104,"schema":3105},"Top tips for efficient AI-powered Code Suggestions with GitLab Duo","Explore best practices  for using Code Suggestions and how to combine it with our other AI features to greatly improve the developer experience (includes real-world exercises).","https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top tips for efficient AI-powered Code Suggestions with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-11\",\n      }",{"title":3102,"description":3103,"authors":3107,"heroImage":929,"date":3108,"body":3109,"category":10,"tags":3110},[1262],"2024-06-11","[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, provides a unique opportunity to make your DevSecOps workflows more efficient. To make the most of GitLab Duo requires hands-on practice and learning in public together. This tutorial centers on GitLab Duo Code Suggestions and provides tips and tricks, learned best practices, and some hidden gems (including how to pair Code Suggestions with our other AI features for even more efficiency). You'll also discover how AI greatly improves the developer experience.\n\nThe best practices, tips, and examples in this article have been created from scratch and are included in the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html) and [GitLab Duo prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts), maintained by the GitLab Developer Relations team. Bookmark this page, and navigate into the respective chapters at your convenience.\n\nWhat you'll learn:\n\n1. [Why use GitLab Duo Code Suggestions?](#why-use-gitlab-duo-code-suggestions%3F)\n1. [Start simple, refine prompts](#start-simple-refine-prompts)\n1. [Practice, practice, practice](#practice-practice%2C-practice)\n    - [Fix missing dependencies](#fix-missing-dependencies)\n    - [Boilerplate code: Optimized logging](#boilerplate-code-optimized-logging)\n    - [Utility helper functions, well tested](#utility-helper-functions-well-tested)\n    - [Generate regular expressions](#generate-regular-expressions)\n1. [Re-trigger Code Suggestions](#re-trigger-code-suggestions)\n    - [Common keyboard combinations to re-trigger Code Suggestions](#common-keyboard-combinations-to-re-trigger-code-suggestions)\n    - [Stuck in the middle of suggestions](#stuck-in-the-middle-of-suggestions)\n    - [Code Suggestions stopped](#code-suggestions-stopped)\n1. [Code Suggestions vs, code generation](#code-suggestions-vs.-code-generation)\n    - [Start with a comment on top for code generation](#start-with-a-comment-on-top-for-code-generation)\n    - [Intent detection for code suggestions and generation](#intent-detection-for-code-suggestions-and-generation)\n    - [Tell a story for efficient code generation](#tell-a-story-for-efficient-code-generation)\n    - [Generate regular expressions](#generate-regular-expressions)\n    - [Iterate faster with code generation](#iterate-faster-with-code-generation)\n    - [Practical code generation: Cloud-native observability](#practical-code-generation-cloud-native-observability)\n1. [Take advantage of all GitLab Duo features](#take-advantage-of-all-gitlab-duo-features)\n    - [Combine Chat with Code Suggestions](#combine-chat-with-code-suggestions)\n    - [Use Chat to generate build configuration](#use-chat-to-generate-build-configuration)\n    - [Use Chat to explain potential vulnerabilities](#use-chat-to-explain-potential-vulnerabilities)\n    - [Combine vulnerability resolution with Code Suggestions](#combine-vulnerability-resolution-with-code-suggestions)\n1. [More tips](#more-tips)\n    - [Verify code quality and security](#verify-code-quality-and-security)\n    - [Learn as a team, and understand AI's impact](#learn-as-a-team-and-understand-ai-impact)    \n    - [Development is a marathon, not a sprint](#development-is-a-marathon-not-a-sprint)\n    - [Contribute using GitLab Duo](#contribute-using-gitlab-duo)\n1. [Share your feedback](#share-your-feedback)\n\n## Why use GitLab Duo Code Suggestions?\n\nConsider these two scenarios:\n\n1. As a senior developer, you have the confidence in your ability with various programming languages to write new source code, review existing code, design resilient architectures, and implement new projects. However, getting familiar with the latest programming language features requires time, research, and a change of habits. So how can you quickly learn about new language feature additions that could make your code even more robust or use resources more sustainably?\n\n    - As a personal example, I learned the C++03 standard, later C++11 and never really touched base on C++14/17/20/23 standards. Additionally, new languages such as [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) came around and offered better developer experiences. What now?\n\n2. As a new engineer, it can be challenging to navigate new projects, get familiar with a new programming language, understand specific algorithms, and find the documentation for structures, interfaces, and other technical components. New engineers are also learning under pressure, which often leads to errors and roadblocks along the way. There is no time for digging into best practices.\n\n    - I, myself, never really learned frontend engineering, just some self-taught HTML, CSS, and JavaScript. Adapting into frontend frameworks such as VueJS after a decade feels overwhelming, and I have little time to learn.\n\nThese scenarios show how hard it can be to keep up with the latest programming languages, best practices, and other key information. [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/), which predictively completes code blocks, defines function logic, generates tests, and proposes common code like regex patterns – all in your coding environment. Code Suggestions provides the AI assistance necessary to learn what you need to know while staying in your development flow.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## Start simple, refine prompts\n\nMy own GitLab Duo adoption journey started with single-line code comments, leading to not-so-great results at first. \n\n```\n# Generate a webserver\n\n// Create a database backend\n\n/* Use multi-threaded data access here */\n```\n\nAfter experimenting with different contexts, and writing styles, I found that code generation out of refined comments worked better. \n\n```\n# Generate a webserver, using the Flask framework. Implement the / URL endpoint with example output.\n\n// Create a database backend. Abstract data handlers and SQL queries into function calls.\n\n/* Use multi-threaded data access here. Create a shared locked resource, and focus on supporting Linux pthreads. */\n```\n\nCode comments alone won't do the trick, though. Let's explore more best practices.\n\n## Practice, practice, practice \n\nFind use cases and challenges for your daily workflows, and exclusively use GitLab Duo. It can be tempting to open browser search tabs, but you can also solve the challenge in your IDE by using GitLab Duo. Here are some examples:\n\n1. Fix missing dependencies (which always cause build/execution failures).\n1. If you're missing logging context, let Code Suggestions auto-complete started function calls, including `print` statements.\n1. Generate common methods and attributes for object-oriented design patterns (e.g. getter/setter methods, `toString()` and object comparison operators, object inheritance, etc.).\n1. Identify the function that generates random crashes. Use Code Suggestions to implement a new function with a different algorithm.\n1. If you encounter application cannot be compiled or executed, cryptic error, ask GitLab Duo Chat about it.\n1. Learn about existing (legacy) code, and strategies to document and refactor code into modern libraries. Start a v2 of an application with a new framework or programming language, helping solve technical debt.\n1. Prevent operations and security issues in Git history by detecting them before they occur (e.g. performance, crashes, security vulnerabilities).\n\nThink of the most boring - or most hated - coding task, and add it to the list above. My least favorite tasks are attribute getter/setter methods in C++ classes (as can be seen in the video below), immediately followed by regular expressions for email address format.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Z9EJh0J9358?si=QGvQ6mXxPPz4WpM0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIt can also help to use Code Suggestions in different programming languages, for example focusing on backend and frontend languages. If you are experienced in many languages, take a look into languages that you have not used in a while, or look into learning a new programming language such as [Python](https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions/) or [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/). \n\nWhen you adopt Code Suggestions into a fast auto-completion workflow, it can happen without any interruption. The suggested code is greyed out and optional, depending on the user interface – for example, VS Code. This means that it will not distract you from continuing to write source code. Try using Code Suggestions on your own by familiarizing yourself with how suggestions are shown, how you can fully or partially accept them, and soon they will become optional help to write better code. \n\n![Image with code suggestions greyed out](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_java_springboot_class_methods_tostring.png)\n\n### Fix missing dependencies\n\nAfter building or running source code, missing dependency errors might be logged and prevent further execution and testing. The following example in Go shows an error from `go build`, where the source code did not import any dependencies yet. A manual approach can be collecting all listed dependencies, running a unique sort on them, and adding them into the source code file, as shown below.\n\n![Go build failed - missing dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps.png)\n\nBut what if GitLab Duo knows about the file context and missing dependencies already? Navigate into the top section and add a comment, saying `// add missing imports` and wait for Code Suggestions.\n\n![GitLab Duo Code Suggestions - go build failed missing dependencies suggested fix](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_suggested_fix.png)\n\nRunning `go build` again results in success, and the source code can be tested and run.\n\n![Go build failed - missing dependencies fixed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_fixed.png)\n\n### Boilerplate code: Optimized logging\n\n*Q: Logging – and more observability data with metrics and traces – can be hard and tedious to implement. What is the most efficient way to implement them that does not impact the application performance or cause bugs?*\n\n*A: Use Code Suggestions to generate logging function calls, and refactor the code into robust observability instrumentation library abstractions. This method can help to prepare the code for later integration with [OpenTelemetry](https://docs.gitlab.com/ee/development/stage_group_observability/gitlab_instrumentation_for_opentelemetry.html), for example.*\n\nExample for a logging class in Ruby:\n\n```ruby\n# Create Logging utility class\n# Define default log level as attribute\n# Add method for logging, inputs: level, app, message\n# Print the data with formatted date and time in syslog format\n\n# Potential AI-generated code below\nclass Logging\n  attr_accessor :log_level\n\n  def log(level, app, message)\n    time = Time.now.strftime(\"%b %d %H:%M:%S\")\n    puts \"#{time} #{app}[#{Process.pid}]: #{level} - #{message}\"\n  end\nend\n\n# Instantiate class and test the log method\n\n# Potential AI-generated code below\nlogger = Logging.new\nlogger.log_level = :debug\n\nlogger.log(:info, 'MyApp', 'Application started - info')\nlogger.log(:debug, 'MyApp', 'Application started - debug')\nlogger.log(:error, 'MyApp', 'Application started - error')\nlogger.log(:fatal, 'MyApp', 'Application started - fatal')\nlogger.log(:warn, 'MyApp', 'Application started - warn')\n```\n\n### Utility helper functions, well tested\n\n*Q: The programming language does not provide basic functions in the standard library. I'm tempted to open my browser to add string manipulation and regular expression parser functions.*\n\n*A: Create a new file called `utility.{language-suffix}` and add a code comment on top. Instruct Code Suggestions to generate a string manipulation function ().*\n\nExample for a string manipulation method in Python:\n\n```python\n# Create a function to search for a string in a string, return the index\n# Optionally remove search string, and return modified string\n# Test the function on the string \"Cool programming languages: C++, Rust, Python\" - Remove the string \"C++\"\n\n# Potential AI-generated code below\ndef search_and_remove(text, search):\n    index = text.find(search)\n    if index != -1:\n        return index, text[:index] + text[index+len(search):]\n    else:\n        return None, text\n\ntext = \"Cool programming languages: C++, Rust, Python\"\nindex, new_text = search_and_remove(text, \"C++\")\nprint(index, new_text)\n```\n\n**Async exercise:** Ask GitLab Duo Chat how to add tests in Python, select the source code, and use the `/tests` slash command. \n\nA similar example can be implemented in Go, creating utility functions for unique integer values in an array, or the sum of all even values. Start with defining the project structure through `go mod init gitlab.com/group/project` and create the `array.go` file. Define the `package` and start with the first code comment to generate the functions.\n\n```go\npackage array \n\n// Create a function that returns unique values from an integer array\n\n// Possible AI-generated code\nfunc unique(ints []int) []int {\n    occurred := map[int]bool{}\n    result := []int{}\n    for _, i := range ints {\n        if !occurred[i] {\n            occurred[i] = true\n            result = append(result, i)\n        }\n    }\n    return result\n}\n\n// Create a function that returns the sum of all even numbers in an integer array\n\n// Possible AI-generated code\nfunc sumEvens(ints []int) int {\n    var sum int\n    for _, i := range ints {\n        if i%2 == 0 {\n            sum += i\n        }\n    }\n    return sum\n}\n```\n\n**Async exercise**: Create more utility helper functions in dedicated libraries, and use Chat to select and generate `/tests`. For the Go example, you can inspect potential solutions in the `go/utility/array_test.go` file in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts). Build and test the code using `go build && go test`.\n\n### Generate regular expressions\n\nDevelopers' favorite one liners, never touched again. `git blame` knows very well but might not be able to provide enough context. GitLab Duo can help with regular expressions creation, explanation, and refactoring, in the following example:\n\n*Q: My regular expressions for parsing IPv6 and IPv4 addresses do not work. What's the best approach to solve this?*\n\n*A: Use Code Suggestions comments to generate examples using these regex types. Combine the questions with Chat, and ask for more examples in different languages. You can also select the existing source, and use a refined prompt with `/refactor using regular expressions` in the Chat prompt.*\n\n**Async exercise**: Choose your favorite language, create a function stub that checks IPv6 and IPv4 address strings for their valid format. Trigger Code Suggestions to generate a parsing regular expression code for you. Optionally, ask Chat how to refine and refactor the regex for greater performance.\n\nI chose TypeScript, a language on my personal learning list for 2024: `// Generate a TypeScript function which parses IPv6 and IPv4 address formats. Use regular expressions`.\n\n![Code Suggestions - typescript utility parse ip address regex](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex.png)\n\n![Code Suggestions typescript - utility parse ip address regex tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex_tests.png)\n\n## Re-trigger Code Suggestions\n\nYou can trigger Code Suggestions by pressing the `enter` or `space` key, depending on the context. In VS Code and the GitLab Web IDE, the GitLab Duo icon will appear in the same line, and at the bottom of the window.\n\nIf you accepted a suggestion, but actually want to try a different suggestion path, select the code, delete the line(s) and start over.\n\n> **Tip:** Different keystrokes and strategies for Code Suggestions are recorded in this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ?si=CmA7PBJ9ckWsvjO3\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Common keyboard combinations to re-trigger Code Suggestions\n\nEspecially in the early adoption phase of Code Suggestions, you'll need to practice to get the best results from comments, existing code style, etc., put into context.\n\nA common keystroke pattern for triggering suggestions can be\n\n1. Press `Enter` and wait for the suggestion.\n1. Press `Space` followed by `Backspace` to immediately delete the whitespace again, or\n1. Press `Enter` to re-trigger the suggestion. `Backspace` to delete any leftover new lines.\n\nWhen a suggestion makes sense, or you want to see how far you can get:\n\n1. Continue pressing `Tab` to accept the suggestion.\n1. Add a space or press `Enter` to open a new scope for triggering a new suggestion.\n1. Continue accepting suggestions with `Tab`. \n\nNote that generative AI sometimes ends up in a loop of suggesting similar code paths over and over again. You can trigger this behavior by inserting test data into an array, using strings and numbers in a sorted order or by generating different API endpoints, as it tries to guess which other endpoints could be helpful. When this happens, break the acceptance flow, and continue writing code as normal.\n\n### Stuck in the middle of suggestions\n\nSometimes, the code suggestions may stop in the middle of a variable, function, etc. definition. If you are unsure about the syntax, or want to restart the code suggestions:\n\n1. Delete the last character(s) or the entire line, using `Backspace`.\n1. Alternatively, use `shift cursor left` (select characters) or `cmd shift cursor left` (select entire line), followed by `Backspace`.\n1. Move the cursor into the line above, and press `Enter` to force a Code Suggestions trigger again.\n\n### Code Suggestions stopped\n\nWhen Code Suggestions stops, there can be multiple reasons:\n\n1. The current file scope ends – for example, a `main()` function has been generated and closed.\n1. There could be connection problems to the GitLab instance (self-managed) or GitLab.com (SaaS, [Dedicated](https://about.gitlab.com/dedicated/)). Follow the [troubleshooting documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/troubleshooting.html).\n\n## Code suggestions vs. code generation\n\nCode suggestions \"come as you go\" while writing code, and help with completing line(s). Code generation on the other hand requires more context to create entire code blocks, consisting of functions, algorithms, classes, etc. \n\nThe following sections discuss both methods, and how to get started with a practical example. \n\n### Code suggestions flow with comments\n\nUse your natural programming flow, and stop to adapt to adding code comments when helpful for context and better suggestions. You can accept code suggestions using the `Tab` key, or selectively accept words using the `cmd cursor right` keyboard shortcut.\n\nThe following new challenge implements a simple Linux statistics tool in C, mimicking the functionality of `iostat`, `vmstat` and `du` CLI commands on Linux. Sometimes, these low-level metrics come in handy for presenting application metrics, or otherwise help with debugging when requesting support data from customers.\n\nCreate a new application directory called `linux_stat` and `main.c` file, and let Code Suggestions complete everything else. First, start with a simple code comment block:\n\n```c\n// Create a Linux statistic tool\n// Collect IO, performance, disk usage, TCP latency\n// Print summaries\n```\n\nNext, press `Enter` to trigger the creation of the `main()` method. If not automatically created, force it through a new code comment.\n\n```c\n// Create main function to collect data\n```\n\nIn my attempts, generative AI did not immediately implement all functionality. Instead, it added a `TODO` comment into the following code block.\n\n```c\nint main() {\n    struct stat statbuf;\n\n    if (stat(\"/etc\", &statbuf) != 0) {\n        perror(\"stat\");\n        return 1;\n    }\n\n    printf(\"IO Block Size: %ld\\n\", statbuf.st_blksize);\n\n    // TODO: add CPU usage, disk usage, network latency measurements\n\n    return 0;\n}\n```\n\nMove the cursor after the `TODO` comment, and continue with a new comment to collect CPU usage:\n\n```c\n    // Collect CPU usage\n\n    // Example for AI-generated code below\n    double cpu_usage;\n\n    FILE* stat_file = fopen(\"/proc/stat\", \"r\");\n    if (stat_file == NULL) {\n        perror(\"fopen\");\n        return 1;\n    }\n\n    // Parse /proc/stat to get CPU usage\n    fscanf(stat_file, \"cpu %lf\", &cpu_usage);\n\n    printf(\"CPU Usage: %.2f%%\\n\", cpu_usage * 100);\n\n    fclose(stat_file);\n```\n\nThe generated code might not be 100% accurate, since measuring CPU usage on Linux requires taking samples over a defined time range. Thus, the caller needs to calculate the real CPU utilization from all values in `/proc/stat`, including [`system, nice, user, idle, iowait`, etc.](https://www.kernel.org/doc/Documentation/filesystems/proc.txt).\n\n**Async challenge:** Optimize the collection process with average calculation.\n\nRepeat the steps for disk usage and network latency. The code comment is written by you, the C source code is generated with GitLab Duo.\n\n```c\n    // Collect disk usage\n\n    // Collect network latency\n\n    // Example for AI-generated code below\n    int sockfd = socket(AF_INET, SOCK_STREAM, 0);\n\n    struct sockaddr_in servaddr;\n    servaddr.sin_family = AF_INET;\n    servaddr.sin_port = htons(80);\n    inet_pton(AF_INET, \"8.8.8.8\", &servaddr.sin_addr);\n```\n\nThe generated code may use public IP addresses to test network latency. Change the IPv4 address to GitLab.com's current address. \n\n```c\n    inet_pton(AF_INET, \"172.65.251.78\", &servaddr.sin_addr);\n```\n\nThe generated source code added more header dependencies. Navigate into the top section of the file, and add a comment to include all required headers:\n\n```c\n// Import required headers\n```\n\nBuild and run the application on Linux, example on Ubuntu 22 LTS:\n\n```shell\n# Install compiler\napt install gcc\n\n# Build\ngcc main.c -o linux_stat\n\n# Build: If the math.h header is included, linking against the library is needed, too\ngcc linux_stat.c -o linux_stat -lm\n\n# Run the tool\n./linux_stat\n```\n\n**Bonus:** Change the code to use IPv6, and check the GitLab.com IPv6 address again (`dig gitlab.com AAAA +short`).\n\n```c\n    // Collect network latency\n\n    // Use IPv6 \n\n    // Example for AI-generated code below\n    struct sockaddr_in6 servaddr;\n    servaddr.sin6_family = AF_INET6;\n    servaddr.sin6_port = htons(80);\n    //inet_pton(AF_INET6, \"2001:db8::1\", &servaddr.sin6_addr);\n    inet_pton(AF_INET6, \"2606:4700:90:0:f22e:fbec:5bed:a9b9\", &servaddr.sin6_addr);\n\n    int sockfd = socket(AF_INET6, SOCK_STREAM, 0);\n```\n\n![C Linux stat tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_c_linux_stat_tests.png)\n\nThe full working source code is available in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the directory for C code.\n\n**Async exercise:** Refactor the C code into Rust, using only GitLab Duo. Start by selecting the source code, and use the Duo Chat prompt `/refactor into Rust`. \n\n> **Tip:** Thoughtful code comments make the source code more readable, too. This helps new team members with onboarding, site reliability engineers with debugging production incidents, and open source contributors with [merging their first MRs](https://handbook.gitlab.com/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows/#first-time-contributors).\n\n### Start with a comment on top for code generation\n\nSource code can be organized in multiple files. Whether you start with a new application architecture, or refactor existing source code, you can take advantage of code generation with GitLab Duo.\n\nStart with a comment block on top, and make it a step-by-step description. You can also break longer comments into multiple lines, revisiting the examples in this article. This pattern also helps to think about the requirements, and can help refining the prompts. \n\n```diff\n# Generate a webserver, using the Flask framework. \n# Implement the / URL endpoint with example output.\n+# Add an endpoint for Promtheus metrics\n\n// Create a database backend. \n// Abstract data handlers and SQL queries into function calls.\n+// Use PostgreSQL as default backend, and SQLite for developers as fallback.\n\n/* \nUse multi-threaded data access here.\nCreate a shared locked resource, and focus on supporting Linux pthreads. \n+Abstract the thread creation/wait procedures into object-oriented classes and methods.\n*/\n```\n\nMore code generation prompts for [supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html) are available in the [GitLab Duo Use Cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts).\n\n### Intent detection for code suggestions and generation\n\nCode Suggestions, depending on the GitLab Language Server in your IDE, will parse and detect the intent and offer code completion suggestions in the same line or code generation.\n\nThe technology in the background uses TreeSitter to parse the code into an [AST](https://en.wikipedia.org/wiki/Abstract_syntax_tree), and determine whether the scope is inside a code comment block (generation), or inside the source code (completion). This detection needs to be executed fast on the client IDE, and proves to be a great use case for [WebAssembly](https://webassembly.org/). You can learn more in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11568), and the following video, which provides a look into the GitLab Language Server powering Code Suggestions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VQlWz6GZhrs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Tell a story for efficient code generation\n\nCode generation is art. Tell the story, and AI-powered GitLab Duo can assist you. \n\nThe following example aims to implement an in-memory key-value store in Go, similar to Redis. Start with a description comment, and trigger Code Suggestions by continuing with a new line and pressing `Enter`.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nWe can be more specific – which methods are required for data manipulation? Instruct Code Suggestions to generate methods for setting keys, updating values, and listing all contained data.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nAccept all suggestions using the `Tab` key. As a next step, instruct Code Suggestions to create a `main` function with test code.\n\n```golang\n// Create a main function and show how the code works\n```\n\nIf the test data is not enough, refine the generated code with a focus on extreme test cases.\n\n> **Tip:** You can use the same method for refined [Chat prompts and test generation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide), `/tests focus on extreme test cases`.\n\n```golang\n// Add more random test data, focus on extreme test cases\n```\n\n![Code Suggestions - go kv more test data](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_more_test_data.png)\n\nThe full example, including fixed dependencies, is located in the [gitlab-duo-prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the `code-suggestions/go/key-value-store` directory. Update the `main.go` file, and build and run the code using the following command: \n\n```shell\ngo build\n./key-value-store\n```\n\nThe first iteration was to create a standalone binary and test different implementation strategies for key-value stores. Commit the working code and continue with your GitLab Duo adoption journey in the next step.\n\n> **Tip:** New projects can benefit from Code Generation, and require practice and more advanced techniques to use code comments for prompt engineering. This method can also make experienced development workflows more efficient. Proof of concepts, new library introductions, or otherwise fresh iterations might not always be possible in the existing project and framework. Experienced developers seek to create temporary projects, and isolate or scope down the functionality. For example, introducing a database backend layer, and benchmarking it for production performance. Or, a library causing security vulnerabilities or license incompatibilities should be replaced with a different library, or embedded code functionality.\n\n### Iterate faster with code generation\n\nExperienced developers will say, \"There must be a key-value library in Go, let us not reinvent the wheel.\" Fortunately, Go is a mature language with a rich ecosystem, and awesome-go collection projects, for example [avelino/awesome-go](https://github.com/avelino/awesome-go), provide plenty of example libraries. Note: This possibility might not be the case for other programming languages, and requires a case-by-case review.\n\nWe can also ask GitLab Duo Chat first, `Which Go libraries can I use for key-value storage?`:\n\n![Chat - ask golang libraries kv](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_chat_ask_golang_libs_kv.png)\n\nAnd then refine the Code Suggestions prompt to specifically use the suggested libraries, for example, BoltDB.\n\n```diff\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n+// Use BoltDB as external library\n```\n\nRepeat the pattern from above: Generate the source code functions, then ask GitLab Duo to create a main function with test data, and build the code. The main difference is external libraries, which need to be pulled with the `go get` command first. \n\n```shell\ngo get\ngo build\n```\n\nIf the source code build fails with missing dependencies such as `fmt`, practice using GitLab Duo again: Move the cursor into the `import` statement, and wait for the suggestion to add the missing dependencies. Alternatively, add a comment saying `Import all libraries`.\n\n![Code Suggestions - go kv external lib boltdb fix dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_fix_deps.png)\n\nYou can also add more test data again, and verify how the functions behave: `// Add more random test data, focus on extreme test cases`. In the following example, an empty key causes the program to panic.\n\n![Code Suggestions - Go kv external lib boltdb test extreme cases panic](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_test_extreme_cases_panic.png)\n\nThis example is a great preparation for test cases later on.\n\n### Practical code generation: Cloud-native observability\n\nThink of a client application in Go, which lists the current state of containers, pods, and services in a Kubernetes cluster, similar to the `kubectl get pods` command line. The Kubernetes project provides [Go libraries](https://pkg.go.dev/k8s.io/client-go/kubernetes) to programmatically interact with the Kubernetes APIs, interfaces, and object structures.\n\nOpen your IDE, and create a new Go project.\n\n> **Tip:** You can ask Chat how to do it - `How to start a Go project? Please show CLI command examples`. \n\nStart with a single comment on top of the `main.go` file, and describe the application purpose: Observability in Kubernetes.\n\n```golang\n// Create a client for Kubernetes observability\n```\n\nThink about the main requirements: Get access to Kubernetes, create context, namespace, and inspect the state. Additionally, instruct Code Suggestions to import packages and create a main package in the `main.go` file.\n\nFirst iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Inspect container, pod, service status and print an overview\n```\n\nThis might do unexpected things with hardcoding the access credentials, missing contexts, failing builds.\n\nSecond iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Inspect container, pod, service status and print an overview\n```\n\nThis might not know about Kubernetes contexts and namespaces, thus leading to build errors or unexpected results.\n\nThird iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n```\n\nThis example hardcodes the Kubernetes context and default namespace to generate an initial foundation. Later iterations can read the namespace value from a command line parameter, or configuration file.\n\nThe final example can look like the following. In addition to the application functionality, it also instructs Code Suggestions to import all dependencies, and create a `main` package in `main.go`.\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n// Import necessary packages\n// Create main package\n```\n\n\u003Cdetails> Solution \n\u003Csummary>\n\n```golang\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc main() {\n\tkubeconfig := os.Getenv(\"KUBECONFIG\")\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpods, err := clientset.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"There are %d pods in the cluster\\n\", len(pods.Items))\n\n\t// Additional code to inspect services, containers, etc\n}\n```\n\n\u003C/summary>\n\u003C/details>\n\nExample output:\n\n![duo code suggestions - go k8s o11y output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_k8s_o11y_output.png)\n\n**Async exercise:** Complete the project with code for inspecting services, containers, etc., and export the findings to [OpenTelemetry](https://opentelemetry.io/).\n\n> **Tip:** Practice with the [GitLab Duo use cases: Code generation prompts](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts) in the documentation, and/or send merge requests with your working prompts.\n\nWhile recording a short video to highlight how code generation is working, another more refined source code was generated. You can inspect the differences in [this commit](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/commit/a1a46de9789d4791f04b4df9f1a35d05b8e67568), and benefit from both solutions.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Take advantage of all GitLab Duo features \n\n### Combine Chat with Code Suggestions\n\nIn combination with [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html), Code Suggestions becomes even more powerful. The following workflow illustrates the intersection of AI efficiency:\n\nWrite and generate new code using Code Suggestions. The source code will be verified through CI/CD automation, code quality tests, and security scanning. But what about the developer's knowledge?\n\n1. In your IDE, select the generated code portions and use the [`/explain` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide) in the Chat prompt. You can even refine the prompt to `/explain with focus on algorithms`, or otherwise helpful scopes such as potential security or performance problems, etc.\n\n    - Continue writing and maintaining source code, but at some point code quality decreases and refactoring gets challenging. Ask GitLab Duo Chat for help.\n\n2. In your IDE, select the source code, and use the [`/refactor` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide) in the Chat prompt. You can refine the prompt to focus on specific design patterns (functions, object-oriented classes, etc.), `/refactor into testable functions` for example._\n\n    - After ensuring more readable code, tests need to be written. What are potential extreme cases, or random data examples for unit tests? Research and implementation in various frameworks can take time.\n\n3. In your IDE, select the source code, and use the [`/tests` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide) in the Chat prompt. You can also refine the prompt to focus in specific test frameworks, scenarios, input methods, etc. \n\n    - Code quality and test coverage reports are green again. Focus on efficient DevSecOps workflows with Code Suggestions again. \n\nMore scenarios are described in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html).\n\n### Use Chat to generate build configuration\n\nThe time-intensive research on getting started with a new project can be exhausting. Especially with different paths to do it right, or alternative frameworks, this can lead to more work than anticipated. Newer programming languages like Rust propose one way (Cargo), while Java, C++, etc. offer multiple ways and additional configuration languages on top (Kotlin DSL, CMake DSL, etc.).\n\nTake advantage of asking GitLab Duo how to start a project, generate specific configuration examples for build tools (e.g. `Please show a gradle.build example for Spring Boot`), and reduce the time to start developing, building, and testing source code.\n\n1. Java, Gradle, Spring Boot: `Please show a gradle.build example for Spring Boot`\n1. C++, CMake, clang: `Please show a basic CMake configuration file for C++17, using clang as compiler.`\n1. Python: `Please show how to initialize and configure a Python project on the CLI`\n1. Rust: `Please show how to initialize and configure a Rust project.`, followed by a refinement question: `Explain the structure of Cargo.toml`.\n1. Go: `Please show how to initialize and configure a Go project`. \n\n### Use Chat to explain potential vulnerabilities\n\nLet us assume that some PHP code was generated to create a web form. The code might be vulnerable to security issues.\n\n```php\n\u003C?php \n// Create a feedback form for user name, email, and comments\n// Render a HTML form\n\n$name = $_POST['name'];\n$email = $_POST['email'];\n$comments = $_POST['comments'];\n\necho '\u003Cform method=\"post\">';\necho '\u003Clabel for=\"name\">Name:\u003C/label>';\necho '\u003Cinput type=\"text\" id=\"name\" name=\"name\">';\n\necho '\u003Clabel for=\"email\">Email:\u003C/label>';\necho '\u003Cinput type=\"email\" id=\"email\" name=\"email\">';\n\necho '\u003Clabel for=\"comments\">Comments:\u003C/label>';\necho '\u003Ctextarea id=\"comments\" name=\"comments\">\u003C/textarea>';\n\necho '\u003Cinput type=\"submit\" value=\"Submit\">'; \necho '\u003C/form>';\n\n?>\n```\n\nSelect the source code, and [ask Chat to explain](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide), using a refined prompt with `/explain why this code is vulnerable to bad security actors`. \n\n![Code Suggestions - Chat explains potential vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_chat_explain_potential_vulnerability.png)\n\n> **Tip**: We are investigating and learning in the local developer environment. The vulnerable source code can be fixed before it reaches a Git push and merge request that trigger security scanning, which will unveil and track the problems, too. Learning about security vulnerabilities helps improve the developer experience.\n\n### Combine vulnerability resolution with Code Suggestions\n\nLets look into another example with an intentional [vulnerability resolution challenge](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities), and see if we can use Code Suggestions in combination with vulnerability resolution. The linked project has been preconfigured with static application security testing (SAST) scanning. You can follow these steps to configure GitLab SAST by using the [SAST CI/CD component](https://gitlab.com/explore/catalog/components/sast) in the `.gitlab-ci.yml` CI/CD configuration file.\n\n```yaml\ninclude:\n  # Security: SAST (for vulnerability resolution)\n  - component: gitlab.com/components/sast/sast@1.1.0\n```\n\nAfter inspecting the vulnerability dashboard and details, you can use [vulnerability explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-explanation) to better understand the context and potential problems. [Vulnerability resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) creates a rerge request with a proposed source code fix for a detected security vulnerability. \n\nSometimes, it can be necessary to refine the suggested code. Navigate into the [created MR](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities/-/merge_requests/1), and either copy the Git branch path for local Git fetch, or open the Web IDE from the `Edit` button to continue in the browser. Navigate into the source code sections with the fixed code portions, and modify the code with a comment:\n\n```\n// refactor using safe buffers, null byte termination\n```\n\n![duo code suggestions - with vulnerability resolution proposal](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_proposal.png)\n\nAlternatively, you can also open Chat, select the source code and use the `/refactor` slash command.\n\n![duo code suggestions - with vulnerability resolution add duo chat refactor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_add_duo_chat_refactor.png)\n\nA full example is available in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#explain-and-resolve-vulnerabilities). \n\nHere is a recording of that example:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ypwx4lFnHP0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More tips \n\n### Verify code quality and security\n\nMore generated code requires quality assurance, testing, and security measures. Benefit from all features on a DevSecOps platform:\n\n1. [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and [pipeline efficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html)\n1. [Code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html)\n1. [Code test coverage](https://docs.gitlab.com/ee/ci/testing/code_coverage.html)\n1. [Application security](https://docs.gitlab.com/ee/user/application_security/)\n1. [Observability](https://docs.gitlab.com/ee/operations/tracing.html)\n\n### Learn as a team, and understand AI impact\n\nAdapt and explore with dedicated team collaboration sessions, and record them for other teams to benefit from later. You can also follow the [GitLab Duo Coffee Chat playlist on YouTube](https://www.youtube.com/playlist?list=PL05JrBw4t0Kp5uj_JgQiSvHw1jQu0mSVZ).\n\nRead about AI impact metrics, including [How to put generative AI to work in your DevSecOps environment](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) and the [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/). Visit the [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to learn more about data usage, transparency, and AI ethics at GitLab.\n\n### Development is a marathon, not a sprint\n\nSometimes, code suggestions might take longer to load, compared to local auto-completion features. Take this time as an advantage, and think about the current algorithm or problem you are trying to solve. Often, a secondary thought can lead to more refined ideas. Or you can take a short break to take a sip from your preferred drink, and continue refreshed when the suggestions arrive.\n\nSome algorithms are super complex, or require code dependencies which cannot be resolved through auto-completion help. Proprietary and confidential code may provide less context to the large language models, and, therefore, require more context in the comments for Code Suggestions. Follow your own pace and strategy, and leverage Code Suggestions in situations where they help with boilerplate code, or helper functions. \n\n> **Tip:** Explore [Repository X-Ray](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/repository_xray.html) for more Code Suggestions context, and test experimental features, for example, [support for more languages in VS Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages-for-code-suggestions-in-vs-code). More insights can be found in the epic to [improve acceptance rate for Code Suggestions](https://gitlab.com/groups/gitlab-org/-/epics/13085).\n\n### Contribute using GitLab Duo\n\nYou can use GitLab Duo to contribute to open source projects, using Code Suggestions, code refactoring, documentation through explanations, or test generation.\n\nGitLab customers can [co-create GitLab using GitLab Duo](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#use-gitlab-duo-to-contribute-to-gitlab), too. Follow the updated guidelines for [AI-generated contributions](https://about.gitlab.com/community/contribute/dco-cla/#ai-generated-contributions), and watch an example recording from the GitLab Duo Coffee Chat: Contribute to GitLab using Code Suggestions and Chat:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TauP7soXj-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Share your feedback\n\nGitLab Duo Code Suggestions enables more efficient development workflows. It requires hands-on practice and exercise through tutorials, team workshops, and guided training. Automated workflows with code quality, security scanning, and observability help tackle challenges with newly introduced source code at a much higher frequency. Taking advantage of all GitLab Duo features, including Chat, greatly improves the developer experience on the most comprehensive AI-powered DevSecOps platform.\n\nUse the best practices in this tutorial to kickstart your journey, follow the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html), and [ask our teams for GitLab Duo AI workshops](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) (I have already shadowed customer workshops, they are great!). Please share your Code Suggestions feedback in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/435783), including screenshots and videos (when possible).\n\n> [Try GitLab Duo for free today!](https://about.gitlab.com/gitlab-duo/#free-trial)",[9,717,737],{"slug":3112,"featured":91,"template":693},"top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","content:en-us:blog:top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","Top Tips For Efficient Ai Powered Code Suggestions With Gitlab Duo","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"_path":3118,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3119,"content":3124,"config":3130,"_id":3132,"_type":14,"title":3133,"_source":16,"_file":3134,"_stem":3135,"_extension":19},"/en-us/blog/track-machine-learning-model-experiments",{"title":3120,"description":3121,"ogTitle":3120,"ogDescription":3121,"noIndex":6,"ogImage":868,"ogUrl":3122,"ogSiteName":706,"ogType":707,"canonicalUrls":3122,"schema":3123},"Track ML model experiments with new GitLab MLFlow integration","Track the many versions of your machine learning models on GitLab using the MLFlow client.","https://about.gitlab.com/blog/track-machine-learning-model-experiments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Track ML model experiments with new GitLab MLFlow integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eduardo Bonet\"}],\n        \"datePublished\": \"2023-05-11\",\n      }",{"title":3120,"description":3121,"authors":3125,"heroImage":868,"date":3127,"body":3128,"category":10,"tags":3129},[3126],"Eduardo Bonet","2023-05-11","\n\n\u003Ci>This blog is the latest post in an ongoing series about GitLab’s journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab.\u003C/i>\n\nThe GitLab DevSecOps platform now features [Machine Learning Model Experiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/), which is avaliable to all GitLab users, making GitLab a powerful tool for creating ML models. Organizations can now track the many versions of their ML models within the GitLab user interface, using the open source [MLFlow](https://github.com/mlflow/mlflow).\n\n\u003Cimg src=\"/images/blogimages/2023-05-11-gitlab-model-experiments/experiment.png\" alt=\"Model experiment\" style=\"border: 1px solid gray;\">\n\n## What is an ML model?\n\nAn ML model is the result of three components: code to extract the patterns from the data, the data where the \npatterns are extracted from, and the configuration used for both, often called \"hyperparameters\". Any change to any of these components can \nlead to changes in the model performance, and keeping track of all of these parts and the results can be challenging. \nExperiment tracking aims to make sense of this confusion by keeping a record of all of the variations created, \nalong with the artifacts and results of each trial.\n\n[MLFlow](https://github.com/mlflow/mlflow) is a popular open source solution for ML experiment tracking, \nproviding a client to log different model versions and their metadata. However, it puts the cost of deployment and managing \nits server onto the users.\n\nGitLab makes the tracking process easier not by deploying a managed MLFlow backend, but by \u003Ci>being an MLFlow backend itself\u003C/i>. This marries the best of both worlds: Data scientists don't need to learn yet another client as their code requires minimal to no changes, while GitLab provides everything else. There is no need to manage a server or to implement user management, so there is no need to configure your artifact storage –  this is all provided by the GitLab DevSecOps platform.\n\n## ML model experiment features in GitLab 16.0\n\nWatch this overview of the available features in 16.0:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uxweU4zT40c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n- **Create experiments and candidates using the MLFlow client**: Simply point the MLFlow client to your GitLab project and experiments and runs will be recorded on GitLab, with no additional setup necessary and no need to create a server. Note that MLFlow runs are called \"candidates\" in GitLab, as each of them is a candidate to become a version of a model.\n\n- **User access management**: Experiments are tied to a GitLab project, making it easy to control which users have access to which models. \n\n- **Manage candidates directly on the GitLab UI**: Search and explore your logged experiments on GitLab, using the UI you already know.\n\n- **Download candidate data as a CSV**: Data scientists that want to explore or create reports on an experiment can download the necessary data as a CSV file.\n\nTo get started, refer to the [documentation](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/#machine-learning-model-experiments).\n\n### More to come\n\nGitLab wants to help you manage the entire lifecycle of your machine learning model from creation to packaging, deployment, and monitoring. \nFor more information on what we are working on, keep an eye on the MLOps Incubation Engineering [handbook page](/handbook/engineering/incubation/mlops/) and on our [YouTube playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KpC6-JQy8lY4tNAZKXBaM_-).\n\nMachine Learning Model Experiments is an experimental feature available to all GitLab tiers, and we are looking for feedback so please [comment in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/381660).\n\nContinue reading our \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\" series.\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,496,233,717],{"slug":3131,"featured":6,"template":693},"track-machine-learning-model-experiments","content:en-us:blog:track-machine-learning-model-experiments.yml","Track Machine Learning Model Experiments","en-us/blog/track-machine-learning-model-experiments.yml","en-us/blog/track-machine-learning-model-experiments",{"_path":3137,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3138,"content":3143,"config":3148,"_id":3150,"_type":14,"title":3151,"_source":16,"_file":3152,"_stem":3153,"_extension":19},"/en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai",{"title":3139,"description":3140,"ogTitle":3139,"ogDescription":3140,"noIndex":6,"ogImage":868,"ogUrl":3141,"ogSiteName":706,"ogType":707,"canonicalUrls":3141,"schema":3142},"Train and deploy AI models with GitLab and Google Cloud's Vertex AI","Demo of GitLab's DevSecOps capabilities combined with Vertex AI's scalable ML platform, designed with the aim of rapid and secure AI deployments.","https://about.gitlab.com/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Train and deploy AI models with GitLab and Google Cloud's Vertex AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-06-08\",\n      }",{"title":3139,"description":3140,"authors":3144,"heroImage":868,"date":3145,"body":3146,"category":10,"tags":3147},[1510],"2023-06-08","\n\u003Ci>This blog is the latest post in an ongoing series about GitLab's journey to \u003Ca href=\"/blog/ai-ml-in-devsecops-series/\">build and integrate AI/ML into our DevSecOps platform\u003C/a>. The first blog post can be found \u003Ca href=\"/blog/what-the-ml-ai/\">here\u003C/a>. Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nMost development and engineering teams are now tasked with maintaining and deploying AI/ML-related code. In this context, the focus on security and efficiency becomes even more crucial. Companies are keen to capitalize on the benefits of AI swiftly while striving to decrease potential risks. GitLab can be used to orchestrate any AI/ML workloads, enabling teams to rapidly develop new generative AI capabilities. [GitLab recently announced a partnership with Google](https://about.gitlab.com/press/releases/2023-05-02-gitLab-and-google-cloud-partner-to-expand-ai-assisted-capabilities.html) to bring [generative AI on Google Cloud](https://cloud.google.com/ai/generative-ai) to our mutual customers.\n\nThis is a tutorial of how to use these tools to deploy an AI model with [Google Cloud's Vertex AI](https://cloud.google.com/vertex-ai) using GitLab to orchestrate the [ModelOps workload](https://about.gitlab.com/direction/modelops/). Our custom model training use case is simple introductory credit card fraud detection, a pertinent issue in the financial industry.\n\n## The solution\nOur solution is a Python-based credit card transaction fraud detection app. Once deployed, applications can use an API endpoint to make predictions on whether a submitted transaction is fraudulent or not.\n\n[Vertex AI](https://cloud.google.com/vertex-ai/docs) is Google Cloud's flagship AI/ML platform that lets users train and deploy machine learning models and AI applications. This is the platform where the API endpoint and model are hosted. While Vertex AI can handle training using prebuilt functions, this demo uses a custom training script written in Python.\n\nFor the demo’s purposes, GitLab hosts the [application source code](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai) and helps to ensure quality and security by running the tests and scans automatically. We also use GitLab's CI/CD to execute the Python code, programmatically upload the resulting artifacts to Google Cloud Storage, and create the endpoint in Vertex AI.\n\nLet's take a high-level look at how the solution is designed and what it does:\n* Data preprocessing: To effectively detect fraudulent transactions, we address the initial imbalance in the raw transaction data. By employing the [Synthetic Minority Over-sampling Technique (SMOTE)](https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SMOTE.html), we duplicate instances of the minority class, enhancing the model's ability to identify patterns.\n* Model training: Using the balanced dataset, we train a [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) model. This creates a 'forest' of decision trees, each based on a random subset of the training data. It outputs the mode of the classes determined by the individual trees, making it well-suited for binary outcomes, such as identifying fraudulent transactions. The trained model is stored in a Google Cloud Storage bucket for later use.\n* Model deployment: Leveraging GitLab's DevSecOps platform, model deployment becomes straightforward. By committing the code to GitLab, the trained model is automatically deployed to Vertex AI. Subsequently, the API endpoint is also established.\n\n## Prerequisites\nBefore we dive into the details, let's make sure you have everything you need to get started with deploying your AI model using GitLab and Vertex AI.\n\nHere are the requirements:\n1. Google Cloud project\n1. Google Cloud service account with these permissions:\n   1. AI Platform Admin\n   1. Service Account User\n   1. Storage Admin\n   1. Storage Object Admin\n   1. Vertex AI Administrator\n1. GitLab project\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/p7GTsbSQWF4\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## Demo walkthrough\nFollowing the [video walkthrough](https://youtu.be/p7GTsbSQWF4), here's a guide for setting up an AI pipeline with GitLab and Google Cloud's Vertex AI.\n\n### Step 1: GitLab and Google Cloud connection\nFirstly, we need GitLab to interact with Google Cloud. Insert your Google Cloud Service Account credentials into your GitLab project's environment variables (make sure they're Base64 encoded for security).\n\n### Step 2: Uploading data to Vertex AI\nMove to the Vertex AI section in Google Cloud. Here, [create and upload your dataset](https://cloud.google.com/vertex-ai/docs/tabular-data/forecasting/create-dataset). In our demo, we use a 'Tabular' dataset for 'Classification' as we're predicting credit card fraud.\n\n### Step 3: Creating the CI/CD pipeline\nBack to GitLab to structure our [CI/CD pipeline](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/ci/editor). It comprises three stages:\n\n**Test:** Quality and security checks.\n\n**Train:** Executes a Python script to train the model, outputting a .pkl  artifact.\n\n```\ntrain:\n stage: train\n script:\n   - apt-get update && apt-get install -y python3-pip python3-venv\n   - python3 -m venv venv\n   - source venv/bin/activate\n   - pip install --upgrade pip\n   - pip install pandas scikit-learn joblib imbalanced-learn google-cloud-storage\n   - python3 src/train.py\n artifacts:\n   paths:\n     - model.pkl\n```\n\n**Deploy:** Uses Google Cloud's Deep Learning platform container to deploy the trained model on Vertex AI.\n\n```\ndeploy:\n stage: deploy\n image:\n   name: us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-5\n   entrypoint: [\"\"]\n script:\n   - python src/deploy.py\n dependencies:\n   - train\n only:\n   - main\n```\n\n### Step 4: Model training\nThe [training code](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/blob/main/src/train.py), running on GitLab CI/CD, fetches data from Vertex AI, processes it, trains our RandomForestClassifier model, and saves the model to Google Cloud Storage.\n\n### Step 5: Model deployment\nThe [deployment script](https://gitlab.com/gitlab-com/alliances/google/sandbox-projects/demos/vertex-ai/-/blob/main/src/deploy.py) creates an endpoint on Vertex AI and deploys our trained model there, utilizing the service account credentials we set initially.\n\n### Step 6: Prediction testing\n\n![Screenshot of Vertex AI](https://about.gitlab.com/images/blogimages/vertex-ai-screenshot.png)\n\nFinally, within Vertex AI, navigate to your model and test its predictions using an input JSON request. If all goes well, you'll get a response from your model.\n\nThere you have it: an AI pipeline with GitLab and Google Cloud's Vertex AI. This combination of GitLab's DevSecOps capabilities with Vertex AI's scalable ML platform is designed with the aim of rapid and secure AI deployments.\n",[716,738,717,9],{"slug":3149,"featured":6,"template":693},"training-and-deploying-ai-models-with-gitlab-and-vertex-ai","content:en-us:blog:training-and-deploying-ai-models-with-gitlab-and-vertex-ai.yml","Training And Deploying Ai Models With Gitlab And Vertex Ai","en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai.yml","en-us/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai",{"_path":3155,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3156,"content":3162,"config":3168,"_id":3170,"_type":14,"title":3171,"_source":16,"_file":3172,"_stem":3173,"_extension":19},"/en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"title":3157,"description":3158,"ogTitle":3157,"ogDescription":3158,"noIndex":6,"ogImage":3159,"ogUrl":3160,"ogSiteName":706,"ogType":707,"canonicalUrls":3160,"schema":3161},"GitLab transforms code review with machine learning tools","Learn how last year's acquisition has resulted in impactful features for the One DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668002/Blog/Hero%20Images/pg-gear.jpg","https://about.gitlab.com/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-06-02\",\n      }",{"title":3163,"description":3158,"authors":3164,"heroImage":3159,"date":3165,"body":3166,"category":1201,"tags":3167},"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality",[1455],"2022-06-02","\n\nA little over a year ago, [GitLab acquired UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html), a machine learning-based solution for automatically identifying [relevant code reviewers](/stages-devops-lifecycle/create/) and distributing review workloads and knowledge. Our goal is to integrate UnReview’s ML-powered code review features throughout GitLab, the One DevOps Platform. We checked in with Taylor McCaslin, principal product manager, ModelOps, at GitLab, to find out the impact UnReview has had so far and what comes next.\n\n**The idea of applying machine learning to code review was already underway at GitLab before the UnReview acquisition. What was it about ML/AI and automation that seemed a good fit for the code review process? How did the UnReview acquisition affect that strategy?**\n\nThe acquisition of UnReview gave GitLab a practical way to get started with a really focused value proposition that was obvious to the platform. ML/AI is a lot more than just having a useful algorithm. UnReview and its team gave GitLab talent with experience building MLOps pipelines and working with production DataOps workflows. As a source code management ([SCM](/solutions/source-code-management/)) and continuous integration ([CI](/topics/ci-cd/)) platform, MLOps and DataOps are key ambitions for our ModelOps stage. UnReview is the foundational anchor of our AI Assisted group, and we anticipate developing more ML-powered features with the base that we’ve built integrating UnReview into our One DevOps platform. If it’s something you manually set today within GitLab, we’ll consider suggestions and automations: suggested labels, assignees, issue relationships, etc. You can learn more about our plans on our [AI Assisted direction page](/direction/modelops/ai_assisted/).\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n**There were [three specific objectives with the UnReview project](/handbook/engineering/development/data-science/ai-assisted/projects/unreview/#overview) when you first started:**\n- **Eliminate the time wasted manually searching for an appropriate code reviewer to review code changes.**\n- **Make optimum recommendations that consider the reviewers’ experience and optimize the review load across the team, which additionally facilitates knowledge sharing.**\n- **Provide analytics on the state of code review in the project, explaining why a particular code reviewer is recommended.**\n\n**Have you had to change or add to these in any way?**\n\nWe now have Suggested Reviewers running for external beta customers as well as dogfooding it internally. We’ve learned a lot about what makes a good code reviewer. Some of the obvious things like context with the changed files and history of committing to that area of code are obvious. But there are less obvious things like what type of code someone has experience with (front-end or back-end).\n\nWe’re finding the concept of recency interesting: the idea that people who more recently interacted with files and functions may be better suited to review the code. Also, people leave companies, and that’s usually not something that can be inferred by the source graph, so we’re working on merging additional GitLab activity data with the recommendation engine.\n\nIn addition, we’re thinking a lot about bias in our recommendations. For example, a senior engineer likely has the most commits across a project, but we don’t always want to recommend a senior engineer. The more we work with the algorithm and recommendations, the more nuanced we find it.\n\nNot every organization does code review the same way, so we’re considering building different models for those that have no process versus organizations that have very rigid and hierarchical reviewer requirements. We also have to consider how recommendations interact with other features of the platform like code owners, maintainer roles, and commit access.\n\nWe’ve never been more excited about the potential of machine learning within GitLab. Some of the feedback we’ve had from beta customers are “this feels like magic” and that honestly encapsulates what we’re going for. Sometimes the right code reviewer is just a feeling that you can’t quite put your finger on. Through data and a little bit of magic, we may see Suggested Reviewers help speed up workflows, and cut down on back and forth and wasted time trying to find someone to do a great review of your code.\n\n**Introducing ML-powered features can come with challenges, especially being GitLab’s first data science feature. Can you speak to some of those challenges and how the team overcame them?**\n\nIt has been about a year since we closed the transaction. During that time period we’ve introduced a lot of new concepts to GitLab. Access to real-time data within the feature with DataOps extraction and cleaning of platform activity data. We have an end-to-end MLOps pipeline running 100% within GitLab CI that extracts, builds, trains, and deploys the UnReview model, and new observability metrics to know if the whole system is working. These are all foundational concepts that we’ve had to build from the ground up.\n\nAlso, we’ve introduced Python to the GitLab tech stack and have to develop new engineering standards and hiring interview practices to find the right talent for this team. We’re now turning the corner of this foundational work and I anticipate that relatively soon we’ll release Suggested Reviewers fully integrated with the platform and UI.\n\nMilestones have been part of the way we’ve sliced up the integration work. We have a variety of internal milestones we’ve been tracking against, including porting the model into GitLab SCM and CI, building the Dataops and MLOps pipelines, and internal and external customer betas. It’s helpful to have these milestones to know what’s most important at any given time and not to get overwhelmed with all the moving pieces. We’re paving a new path with ML-powered features at GitLab, and once we’re done we’ll have a repeatable process and template to replicate over and over with new data science-powered features.\n\n**What has been the most surprising thing you’ve encountered or learned since UnReview first debuted?**\n\nCode Reviewers are foundational to the software development lifecycle. We thought this would be a really straightforward feature, but it turns out people REALLY care about recommendations. People hate bad suggestions so when the recommendations are wrong, the feedback is fast and furious. But when it’s right, it feels like magic. That really surprised me how positively people respond to a great suggestion.\n\nA lot of GitLab users have asked me what our success metric is for Suggested Reviewers. It should just feel like magic. Maybe you don’t know why someone was chosen, but you just feel they were the right person to review the change. And hopefully that leads to a more thoughtful code review, reduces the back and forth of trying to find someone to review your code, and ultimately creates a better experience end-to-end. A lot of engineers dread code reviews; we want to change that. I hope Suggested Reviewers can take the pain out of the experience and make it something engineers look forward to. That’s the feeling we’re trying to create with our recommendations. Obvious but magic.\n\n**What’s next for UnReview specifically and DevOps code review more generally? Where do you see the next big advances happening?**\n\nWe’re just scratching the surface. There are so many opportunities for recommendations and automations across the platform. We have a lot of data at GitLab, from the source graph, contribution history, CI builds, test logs, security scans, and deployment data. We believe all of this can be integrated together. I’m particularly excited about what we’re calling [Intelligent Code Security](/direction/modelops/ai_assisted/#categories). The idea is that we will be able to look at your source code as you’re writing it, analyze it for security vulnerabilities, and not only suggest fixes to common security flaws, but also apply that change, run your CI, confirm the build succeeds, confirm the vulnerability was resolved, and possibly even deploy that change, all automatically.\n\nImagine the future where your code gets more secure automatically while you sleep. That sounds wild, but we have the data to power [a feature like this in the future](/direction/modelops/ai_assisted/#categories). Suggested Reviewers is just the beginning. We haven’t seen many DevOps platforms fully embrace the data, code, and activity data that they have in a material way. I think we’ll see a lot more in this space moving forward as development platforms identify the massive opportunities to drive efficiencies and remove the frustrating parts of software development from the process.\n",[917,834,1081,233,9],{"slug":3169,"featured":6,"template":693},"unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","content:en-us:blog:unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","Unreview A Year Later How Gitlab Is Being Transformed By Ml Powered Code Review","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"_path":3175,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3176,"content":3182,"config":3188,"_id":3190,"_type":14,"title":3191,"_source":16,"_file":3192,"_stem":3193,"_extension":19},"/en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"title":3177,"description":3178,"ogTitle":3177,"ogDescription":3178,"noIndex":6,"ogImage":3179,"ogUrl":3180,"ogSiteName":706,"ogType":707,"canonicalUrls":3180,"schema":3181},"Use GitLab AI features out-of-the-box in a GitLab Workspace","GitLab Workspaces now ships with the GitLab workflow extension preinstalled, providing access to powerful AI features like GitLab Duo Chat and Code Suggestions for increased productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098843/Blog/Hero%20Images/Blog/Hero%20Images/securitylifecycle-light_securitylifecycle-light.png_1750098843047.png","https://about.gitlab.com/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab AI features out-of-the-box in a GitLab Workspace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Safwan Ahmed\"}],\n        \"datePublished\": \"2024-07-24\",\n      }",{"title":3177,"description":3178,"authors":3183,"heroImage":3179,"date":3185,"body":3186,"category":10,"tags":3187},[3184],"Safwan Ahmed","2024-07-24","AI is transforming the way we get work done, from helping automate mundane tasks to optimizing aspects of our day-to-day workflow. Of particular relevance has been [generative AI’s ability](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) to support developers in getting the job done, from code snippet suggestions to concise summaries of technical questions. These AI tools have been embedded in the development lifecycle through integrations with existing software like code editors and CI/CD platforms. Thanks to these integrations, particularly in the case of code editors, developers can have an AI assistant that complements their skills within their development environment.\n\nWhile these AI tools can help boost productivity, setting them up in an existing development environment may not be preferable. For example, you may not want to install a new dependency on your local workstation that could affect your setup, you may have security or privacy concerns about running AI tools on your computer, or you may find it hard to give the tooling context on your existing workflow. GitLab resolves these issues by providing a suite of tools that allow you to leverage the power of AI in [a remote development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) right out of the box. In this blog, you'll learn about the GitLab features that make this possible and how to set up your [workspace environment](https://docs.gitlab.com/ee/user/workspace/) to get started.\n\n## GitLab workflow extension for VS Code\n\nGitLab workflow extension for VS Code integrates GitLab into the VS Code editor. It brings into scope key elements of your GitLab workflow such as issues, merge requests, and pipeline status. For more information, visit [the GitLab workflow extension documentation](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/).\n\n## GitLab Workspaces\n\nGitLab Workspaces provide an isolated development environment to make changes to your GitLab projects. Workspaces offer a platform to work on your projects without the complexity of setting up local dependencies. Workspaces also provide reproducible development setups, as a workspace environment configuration created by one developer can be shared with others. GitLab Workspaces are configured to use the VS Code editor and ship with the workflow extension preinstalled. To learn more, visit the [GitLab Workspaces documentation](https://docs.gitlab.com/ee/user/workspace/).\n\n## GitLab Duo Chat and Code Suggestions\n\n[GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available) and [GitLab Duo Code Suggestions](https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available/) are part of the GitLab Duo suite of AI features enhancing developer productivity. Chat and Code Suggestions are integrated into the workflow extension and are GitLab context-aware. This allows you to ask GitLab Duo questions about items like issues and merge requests and to automatically have access to code suggestions and code completion. This integration requires [a GitLab Duo license](https://about.gitlab.com/gitlab-duo/). See the [GitLab Duo Chat documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/) and [GitLab Duo Code Suggestions documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) for more information.\n\n## How to set up the Workflow extension, Workspaces, and GitLab Duo to work together\n\nWhile these features are impressive on their own, when combined they deliver on the promise of an easy-to-spin-up, isolated, AI-driven development environment. Here are the steps to get this powerhouse up and running.\n\n## Create a workspace\n\nFollow this [comprehensive but easy-to-follow tutorial](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) to create a remote development workspace.\n\n## Validate GitLab Workflow is active\n\nAfter your workspace is up and running, you should see a GitLab icon on the side of your editor like the following:\n\n![Arrow pointing to GitLab tanuki icon](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098853108.png)\n\nYou can then use the workflow extension to bring up merge requests assigned to you in the current project in GitLab. To do this, access the command palette by hitting `command + shift + P` and entering `GitLab: Show Merge Requests Assigned to Me`. This will redirect you to GitLab and show your assigned MRs.\n\n![Arrow pointing to 'GitLab: Show Merge Requests Assigned to Me'](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098853109.png)\n\nFor more tips and tricks, read [Visual Studio code editor: Eight tips for using GitLab VS Code](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/).\n\n## Use GitLab Duo Chat\n\nYou should also see a second, smaller GitLab icon on your sidebar. This gives you access to GitLab Duo Chat. Feel free to ask it a question.\n\n![Arrow pointing to GitLab tanuki icon with sparkles around it](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098853111.png)\n\n## Use GitLab Duo Code Suggestions\n\nOpen up any source file in your directory. You can begin typing code and have predictive suggestions, powered by GitLab Duo Code Suggestions, pop up –  you can insert them by hitting the tab key. The example below shows my attempt to write a string processing function. Code Suggestions has inferred I would want to split the passed string into spaces, which is indeed my intention.\n\n![Code Suggestions suggesting that the passed string into spaces](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098853112.png)\n\nSuppose I have completed my string processing function above and would like to generate unit tests for it but want to avoid the chore of writing boilerplate code. You can provide a comment in your editor and have Code Suggestions generate code for you like the following:\n\n![Shows boilerplate code for generating unit tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098853115.png)\n\nCode Suggestions implements a whole unit test for my function, covering happy and sad paths.\n\nFor more exciting uses of the GitLab Duo suite, check out these articles:\n* [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n* [Top tips for efficient AI-powered Code Suggestions with GitLab Duo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n* [\"Developing GitLab Duo\" blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n\n# Next steps\n\nGitLab Workspaces is coming up with more exciting integrations and features that will enhance your remote development experience, be sure to check out the [category epic](https://gitlab.com/groups/gitlab-org/-/epics/7419) to know what’s coming next!\n\n> Sign up for [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today!\n",[9,759,233,717],{"slug":3189,"featured":91,"template":693},"use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","content:en-us:blog:use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","Use Gitlab Ai Features Out Of The Box In A Gitlab Workspace","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"_path":3195,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3196,"content":3202,"config":3207,"_id":3209,"_type":14,"title":3210,"_source":16,"_file":3211,"_stem":3212,"_extension":19},"/en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"title":3197,"description":3198,"ogTitle":3197,"ogDescription":3198,"noIndex":6,"ogImage":3199,"ogUrl":3200,"ogSiteName":706,"ogType":707,"canonicalUrls":3200,"schema":3201},"Use GitLab Duo to build and deploy a simple Quarkus-native project","This tutorial shows how a Java application is compiled to machine code and deployed to a Kubernetes cluster using a CI/CD pipeline. See how AI makes the process faster and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo to build and deploy a simple Quarkus-native project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-10-17\",\n      }",{"title":3197,"description":3198,"authors":3203,"heroImage":3199,"date":3204,"body":3205,"category":10,"tags":3206},[733],"2024-10-17","In [“How to automate software delivery using Quarkus and\nGitLab,”](https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab/)\nyou learned how to develop and deploy a simple Quarkus-JVM application to a\nKubernetes cluster using [GitLab Auto\nDevOps](https://docs.gitlab.com/ee/topics/autodevops/). Now, you'll learn\nhow to use Quarkus-native to compile a Java application to machine code and\ndeploy it to a Kubernetes cluster using a CI/CD pipeline. Follow our journey\nfrom development to deployment leveraging [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) as our AI companion, including\nthe specific prompts we used.\n\n\n## What is Quarkus?\n\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java,\nis an open source, Kubernetes-native Java stack tailored to OpenJDK HotSpot\nand GraalVM. The Quarkus project recently moved to the [Commonhaus\nFoundation](https://www.commonhaus.org/), a nonprofit organization dedicated\nto the sustainability of open source libraries and frameworks that provides\na balanced approach to governance and support.\n\n\n## Prerequisites\n\n\nThis tutorial assumes:\n\n\n- You have a running Kubernetes cluster, e.g. GKE.\n\n- You have access to the Kubernetes cluster from your local laptop via the\n`kubectl` command.\n\n- The cluster is connected to your GitLab project.\n\n- You have [Maven (Version 3.9.6 or later)](https://maven.apache.org/)\ninstalled on your local laptop.\n\n- You have Visual Studio Code installed on your local laptop.\n\n\nIf you’d like to set up a Kubernetes cluster connected to your GitLab\nproject, you can follow the instructions in this\n[tutorial](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/),\nup to but not including the “Creating an instance of MySQL database in your\ncluster via Flux” section (you do not need a database for this tutorial).\n\n\nYou will also need to install an nginx ingress in your Kubernetes cluster.\nHere are two ways to do this:\n\n1. You can follow the instructions in [“Creating and importing\nprojects”](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/#creating-and-importing-projects),\nup to the creation of the variable `KUBE_INGRESS_BASE_DOMAIN`.\n\n2. Or, just create an ingress in your Kubernetes cluster by following the\ninstructions in our [Auto DevOps with GKE\ndocumentation](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html#install-ingress).\n\n\n**NOTE:** For this article, we used the first method above to install an\ningress and cert-manager in the Kubernetes cluster.\n\n\n## Creating necessary project files using GitLab Duo Chat\n\n\nWe started our endeavor from VS Code and an empty project called\n`quarkus-native`, which we had previously created in GitLab and had already\ncloned to our local laptop.\n\n\n1. We opened GitLab Duo Chat, within VS Code, and entered the following\nprompt:\n\n\n**_Create a “Hello World” Quarkus application that can be natively\ncompiled_**\n\n\n![hello-world-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/hello-world-prompt.png)\n\n\nChat replied with the prerequisites and process of what to do to create a\nsimple “Hello World” Quarkus application. At this point, we were after the\ngenerated body of the class `HelloResource.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport javax.ws.rs.GET;\n\nimport javax.ws.rs.Path;\n\nimport javax.ws.rs.Produces;\n\nimport javax.ws.rs.core.MediaType;\n\n\n@Path(\"/hello\")\n\npublic class HelloResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"Hello World\";\n    }\n}\n\n```\n\n\n2. We knew that for Quarkus we needed a special Dockerfile, named\n`Dockerfile.native`, as part of the creation of the image for the\napplication executable. We then entered the following prompt in Chat:\n\n\n**_Create the Dockerfile.native file for the sample application above_**\n\n\n![create-dockerfile-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675941/Blog/Content%20Images/create-dockerfile-prompt.png)\n\n\nChat replied with a very thorough answer about this file, its contents, and\nthe containerization process. At this point, we were just after the\ngenerated body of the file `Dockerfile.native`:\n\n\n```\n\n####\n\n# This Dockerfile is used in order to build a container that runs the\nQuarkus application in native (no JVM) mode\n\n####\n\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.6\n\nWORKDIR /work/\n\nRUN chown 1001 /work \\\n    && chmod \"g+rwX\" /work \\\n    && chown 1001:root /work\nCOPY --chown=1001:root target/*-runner /work/application\n\n\nEXPOSE 8080\n\nUSER 1001\n\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]\n\n```\n\n\n3. Next, we asked Chat to generate the `pom.xml` for this application by\nentering the following prompt:\n\n\n**_Create the pom.xml file for the sample application above_**\n\n\n![create-pom-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pom-prompt.png)\n\n\nChat provided the content of the `pom.xml` file, an explanation of its\ncontents, and how to use it. At this point, we were after the generated body\nof the file `pom.xml`:\n\n\n```\n\n\u003C?xml version=\"1.0\"?>\n\n\u003Cproject xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0\nhttps://maven.apache.org/xsd/maven-4.0.0.xsd\"\nxmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  \u003CmodelVersion>4.0.0\u003C/modelVersion>\n  \u003CgroupId>org.acme\u003C/groupId>\n  \u003CartifactId>hello-world-quarkus\u003C/artifactId>\n  \u003Cversion>1.0.0-SNAPSHOT\u003C/version>\n  \u003Cproperties>\n    \u003Ccompiler-plugin.version>3.10.1\u003C/compiler-plugin.version>\n    \u003Cmaven.compiler.release>11\u003C/maven.compiler.release>\n    \u003Cproject.build.sourceEncoding>UTF-8\u003C/project.build.sourceEncoding>\n    \u003Cproject.reporting.outputEncoding>UTF-8\u003C/project.reporting.outputEncoding>\n    \u003Cquarkus.platform.artifact-id>quarkus-bom\u003C/quarkus.platform.artifact-id>\n    \u003Cquarkus.platform.group-id>io.quarkus.platform\u003C/quarkus.platform.group-id>\n    \u003Cquarkus.platform.version>2.16.5.Final\u003C/quarkus.platform.version>\n    \u003CskipITs>true\u003C/skipITs>\n    \u003Csurefire-plugin.version>3.0.0-M7\u003C/surefire-plugin.version>\n  \u003C/properties>\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>${quarkus.platform.artifact-id}\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  \u003Cdependencies>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-arc\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-resteasy\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-junit5\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.rest-assured\u003C/groupId>\n      \u003CartifactId>rest-assured\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n  \u003C/dependencies>\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>quarkus-maven-plugin\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Cextensions>true\u003C/extensions>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>build\u003C/goal>\n              \u003Cgoal>generate-code\u003C/goal>\n              \u003Cgoal>generate-code-tests\u003C/goal>\n            \u003C/goals>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-compiler-plugin\u003C/artifactId>\n        \u003Cversion>${compiler-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CcompilerArgs>\n            \u003Carg>-parameters\u003C/arg>\n          \u003C/compilerArgs>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-surefire-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CsystemPropertyVariables>\n            \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n            \u003Cmaven.home>${maven.home}\u003C/maven.home>\n          \u003C/systemPropertyVariables>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-failsafe-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>integration-test\u003C/goal>\n              \u003Cgoal>verify\u003C/goal>\n            \u003C/goals>\n            \u003Cconfiguration>\n              \u003CsystemPropertyVariables>\n                \u003Cnative.image.path>${project.build.directory}/${project.build.finalName}-runner\u003C/native.image.path>\n                \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n                \u003Cmaven.home>${maven.home}\u003C/maven.home>\n              \u003C/systemPropertyVariables>\n            \u003C/configuration>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  \u003Cprofiles>\n    \u003Cprofile>\n      \u003Cid>native\u003C/id>\n      \u003Cactivation>\n        \u003Cproperty>\n          \u003Cname>native\u003C/name>\n        \u003C/property>\n      \u003C/activation>\n      \u003Cproperties>\n        \u003CskipITs>false\u003C/skipITs>\n        \u003Cquarkus.package.type>native\u003C/quarkus.package.type>\n      \u003C/properties>\n    \u003C/profile>\n  \u003C/profiles>\n\u003C/project>\n\n```\n\n\n4. We then asked Chat to generate the Quarkus test file for this application\nby entering the following prompt:\n\n\n**_Create the content for the Quarkus test for the application above_**\n\n\n![create-quarkus-test-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-quarkus-test-prompt.png)\n\n\nChat returned the body for the Quarkus test, how to use it, and the body of\nthe Java class for the test needed for its native compilation. At this\npoint, we were after the generated bodies of the file\n`HelloResourceTest.java` and `HelloResourceIT.java`.\n\n\nHere is the generated body for `HelloResourceTest.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusTest;\n\nimport org.junit.jupiter.api.Test;\n\n\nimport static io.restassured.RestAssured.given;\n\nimport static org.hamcrest.CoreMatchers.is;\n\n\n@QuarkusTest\n\npublic class HelloResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"Hello World\"));\n    }\n\n}\n\n```\n\n\nHere is the generated body for `HelloResourceIT.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusIntegrationTest;\n\n\n@QuarkusIntegrationTest\n\npublic class HelloResourceIT extends HelloResourceTest {\n    // Execute the same tests but in native mode.\n}\n\n```\n\n\n5. We needed to know how to organize these files in the GitLab project, so\nwe asked about the directory structure for all these files by entering the\nfollowing prompt in Chat:\n\n\n**_Give me the entire directory structure for this project including the\nlocation of each file, e.g. pom.xml, Dockerfile.native,\napplication.properties, HelloResource.java, HelloResourceTest.java, and the\nlocation of the target directory_**\n\n\n![create-dir-struct-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-dir-struct-prompt.png)\n\n\nChat replied with a detailed diagram about the entire directory structure\nfor the project and where all these files should be located as well as a\ndescription of the purpose of each of them. It even mentioned that the\ndirectory `target/` and its contents should not be version controlled since\nit was generated by the build process. Another interesting aspect of the\nreply was the existence of a file called `resources/application.properties`\nin the directory structure.\n\n\n![dir-struct-chat-response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/dir-struct-chat-response.png)\n\n\nWith all this information in our hands, we were ready to start creating\nthese files in our GitLab project.\n\n\n## Populating our project with the generated content for each file\n\n\nWe created each of the following files in their corresponding location and\ntheir generated content as provided by Chat:\n\n\n- `src/main/java/org/acme/HelloResource.java`\n\n- `resources/application.properties`\n\n- `src/test/java/org/acme/HelloResourceTest.java`\n\n- `src/test/java/org/acme/HelloResourceIT.java`\n\n- `pom.xml`\n\n- `Dockerfile.native`\n\n\n**NOTE:** We considered using GitLab Auto Deploy for this endeavor but later\nrealized that it would not be a supported option. We are mentioning this\nbecause in the video at the end of this tutorial, you will see that we asked\nChat: `How to set the service internalPort to 8080 for auto deploy`. Then we\ncreated a file named `.gitlab/auto-deploy-values.yaml` with the generated\ncontent from Chat. The creation of this file is not necessary for this\ntutorial.\n\n\nBefore we started tackling the pipeline to build, containerize, and deploy\nthe application to our Kubernetes cluster, we decided to generate the\nexecutable locally on our Mac and test the application locally.\n\n\n## Testing the application locally\n\n\nHere is the process we went through to test the application on our local\nmachine.\n\n\n1. To build the application on the local Mac laptop, from a Terminal window,\nwe entered the following command:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\n![first-build](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/first-build.png)\n\n\nThe native compilation failed with the error message:\n\n\n`Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System\nPATH. Install it using ‘gu install native-image’`\n\n\n2. So, we used our trusty GitLab Duo Chat again and asked it the following:\n\n\n**_The command “mvn clean package -Pnative” is failing with error\n“java.lang.RuntimeException: Cannot find the ‘native-image’ in the\nGRAALVM_HOME, JAVA_HOME and System PATH. Install it using gu install\nnative-image”. I’m using a MacOS Sonoma. How do I fix this error on my\nMac?_**\n\n\n![how-to-fix-build-failure-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-fix-build-failure-prompt.png)\n\n\nChat replied with a detailed set of steps on how to install the necessary\nsoftware and set the appropriate environment variables.\n\n\n3. We copied and pasted the following commands from the Chat window to a\nTerminal window:\n\n\n```\n\nbrew install –cask graalvm/tap/graalvm-ce-java17\n\nexport JAVA_HOME=/Library/Java/JavaVIrtualMachines/graalvm-ce-java17-22.3.1\n\nexport GRAALVM_HOME=${JAVA_HOME}\n\nexport PATH=${GRAALVM_HOME}/bin:$PATH\n\nxattr -r -d com.apple.quarantine ${GRAALVM_HOME}/../..\n\ngu install native-image\n\n```\n\n\nThe commands above installed the community edition of GraalVM Version 22.3.1\nthat supported Java 17. We noticed, during the brew install, that the\nversion of the GraalVM being installed was `java17-22.3.1`, so we had to\nupdate the pasted value for `JAVA_HOME` from `graalvm-ce-java17-22.3.0` to\n`graalvm-ce-java17-22.3.1`.\n\n\nWe also had to run the `xattr` command to get the GraalVM, which we had\ndownloaded and installed on our Mac, out of quarantine so that it could run\nlocally. Lastly, we installed the GraalVM native-image.\n\n\n4. At this point, we again, from a Terminal window, entered the following\ncommand to build the application on the local Mac laptop:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\nThis time the compilation was successful and an executable was generated in\nthe `target` directory.\n\n\n![successful-local-compilation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/successful-local-compilation.png)\n\n\n5. We ran the executable by entering the following commands from a Terminal\nwindow:\n\n\n```\n\ncd target\n\n./quarkus-native-1.0.0-SNAPSHOT-runner “-Dquarkus.http.host=0.0.0.0”\n\n```\n\n\n![executable-local-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/executable-local-run.png)\n\n\n6. With the application running, we opened a browser window, and in the URL\nfield, we entered:\n\n\n```\n\nhttp://localhost:8080/hello\n\n```\n\n\n![app-running-locally](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-locally.png)\n\n\nThe application returned the string `Hello World`, which was displayed in\nthe browser window.\n\n\nAt this point, we committed and pushed all the changes to our GitLab project\nand started working on creating a CI/CD pipeline that would build and deploy\nthe application to a Kubernetes cluster running on the cloud.\n\n\nBut before continuing, we remembered to add, commit, and push a `.gitignore`\nfile to our project that included the path `target/`, since this was the\ndirectory where the executable would be created and we didn’t need to keep\nit - or its contents - under version control.\n\n\n## Creating the pipeline with GitLab Duo Chat\n\n\nNow that we had already successfully tested the application locally on our\nMac, we needed to create the CI/CD pipeline that would compile the\napplication, containerize it, and deploy it to our Kubernetes cluster. We\nwanted to keep the pipeline simple, brief, and have a single environment in\nwhich to deploy it. To this end, the pipeline would not tackle multiple\nenvironments or feature branches, for example.\n\n\n1. To avoid manually creating a pipeline from scratch, we decided to once\nagain leverage Chat. We entered the following prompt\n\n\n**_Create a .gitlab-ci.yml file with 3 stages: build, containerize, and\ndeploy. Each of these stages should have a single job with the same name.\nThe build job should compile the application natively using the -Pnative\nmaven option and the builder image for mandrel jdk-22 for java17 and store\nthe application executable and its Dockerfile as artifacts. The containerize\njob should use docker to build and push the image to the built-in container\nregistry. The deploy job should rollout the containerized application, named\nquarkus-app, to the production environment in the Kubernetes cluster by\ncreating a deployment resource, a service resource and an ingress rule with\nhost ${KUBE_INGRESS_BASE_DOMAIN} and /hello prefix. The deploy job should\nalso include the url for the production environment. When setting the\nKubernetes context, use ${KUBE_CONTEXT}, and for application namespace, use\n${KUBE_NAMESPACE}_**\n\n\n![create-pipeline-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pipeline-prompt.png)\n\n\nThe prompt above provided some points that we thought would be useful and\nnecessary for a good outcome. The requirements listed by the prompt were:\n\n\n- It specified that the pipeline needed to have three stages: build,\ncontainerize, and deploy.\n\n- Each stage would have a single job.\n\n- The build job should natively compile (not build) the application using\nthe -Pnative option.\n\n- The build job should use the open source software `mandrel` and use the\nversion that would support jdk-22 for Java 17. Remember that these were the\nversions that we tested on our local machine.\n\n- The build job would also need to pass the resulting executable and the\nproject Dockerfile to the next job, i.e. containerize, in the pipeline.\n\n- The containerize job should build a docker image with the executable using\nthe Dockerfile, both of which were passed as artifacts.\n\n- The containerize job should push the image to the GitLab container\nregistry.\n\n- The deploy job should use the image from the GitLab container registry,\nname it `quarkus-app`, and deploy it to the Kubernetes cluster using\ndeployment, service and ingress rule resources.\n\n- The deploy job should use the environment variables\n`${KUBE_INGRESS_BASE_DOMAIN}`, `${KUBE_CONTEXT}`, and `${KUBE_NAMESPACE}`,\nwhich had been defined as group/project variables, accordingly.\n\n\nChat responded with the following `.gitlab-ci.yml` example:\n\n\n```\n\n# Define the stages for the pipeline\n\nstages:\n  - build\n  - containerize\n  - deploy\n\n# Variables section to use GitLab CI/CD variables\n\nvariables:\n  MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  MAVEN_CLI_OPTS: \"--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true\"\n  CONTAINER_IMAGE: ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}\n\n# Cache configuration for Maven dependencies\n\ncache:\n  paths:\n    - .m2/repository\n\n# Build stage: Compile the application natively\n\nbuild:\n  stage: build\n  image:\n    name: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n      - \"\"\n  script:\n    - ./mvnw ${MAVEN_CLI_OPTS} clean package -Pnative\n  artifacts:\n    paths:\n      - target/*-runner\n      - src/main/docker/Dockerfile.native-micro\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Containerize stage: Build and push Docker image\n\ncontainerize:\n  stage: containerize\n  image: docker:20.10.16\n  services:\n    - docker:20.10.16-dind\n  variables:\n    DOCKER_TLS_CERTDIR: \"/certs\"\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -f src/main/docker/Dockerfile.native-micro -t ${CONTAINER_IMAGE} .\n    - docker push ${CONTAINER_IMAGE}\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Deploy stage: Deploy to Kubernetes\n\ndeploy:\n  stage: deploy\n  image: \n    name: bitnami/kubectl:latest\n    entrypoint: ['']\n  script:\n    - kubectl config use-context ${KUBE_CONTEXT}\n    - kubectl config set-context --current --namespace=${KUBE_NAMESPACE}\n    - |\n      cat \u003C\u003CEOF | kubectl apply -f -\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: quarkus-app\n      spec:\n        replicas: 1\n        selector:\n          matchLabels:\n            app: quarkus-app\n        template:\n          metadata:\n            labels:\n              app: quarkus-app\n          spec:\n            containers:\n            - name: quarkus-app\n              image: ${CONTAINER_IMAGE}\n              ports:\n              - containerPort: 8080\n      ---\n      apiVersion: v1\n      kind: Service\n      metadata:\n        name: quarkus-app\n      spec:\n        selector:\n          app: quarkus-app\n        ports:\n        - port: 80\n          targetPort: 8080\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: quarkus-app\n      spec:\n        rules:\n        - host: ${KUBE_INGRESS_BASE_DOMAIN}\n          http:\n            paths:\n            - path: /hello\n              pathType: Prefix\n              backend:\n                service:\n                  name: quarkus-app\n                  port: \n                    number: 80\n      EOF\n    - echo \"Application deployed to https://${KUBE_INGRESS_BASE_DOMAIN}/hello\"\n  # This job runs only on the main branch\n  only:\n      - main\n```\n\n\n2. There were some things we needed to adjust in the sample `.gitlab-ci.yml`\nfile above before we could commit it to our `main` branch. These are the\nupdates we made to the file:\n\n\n- We deleted all occurrences of `only: -main` because we wanted to keep of\npipeline definition file simple and with no branch-related rules.\n\n- We fixed the name of the file `Dockerfile.native-micro` to\n`Dockerfile.native`.\n\n\n3. At this point, we wanted to ensure that the deployment would be to the\n`production` environment so we asked Chat the following prompt:\n\n\n**_What is the syntax to specify an environment with its url in a\npipeline?_**\n\n\n![how-to-add-env-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-env-prompt.png)\n\n\nThe response from Chat included an example of how to do this so we used this\ninformation to add the following environment block to our pipeline:\n\n\n```\n  environment:\n       name: production\n       url: http://${KUBE_INGRESS_BASE_DOMAIN}/hello\n```\n\n\n4. The example provided by Chat includes a URL that started with `https` and\nwe modified that to `http` since we didn’t really need a secure connection\nfor this simple application.\n\n\n5. Lastly, we noticed that in the `build` job, there was a script `mvnw`\nthat we didn’t have in our project. So, we asked Chat the following:\n\n\n**_How can I get the mvnw script for Quarkus?_**\n\n\n![how-to-add-mvnw-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-mvnw-prompt.png)\n\n\nChat responded with the command to execute to bootstrap and create this\nscript. We executed this command from a Terminal window:\n\n\n```\n\nmvn wrapper:wrapper\n\n```\n\n\nWe were now ready to commit all of our changes to the `main` branch and have\nthe pipeline executed. However, on our first attempt, our first pipeline\nfailed at the build job.\n\n\n## Troubleshooting using GitLab Duo Root Cause Analysis\n\n\nOur first attempt at running our brand-new pipeline failed. So, we took\nadvantage of [GitLab Duo Root Cause\nAnalysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/),\nwhich looks at the job logs and provides a thorough natural language\nexplanation (with examples) of the root cause of the problem and, most\nimportantly, how to fix it.\n\n\n![build-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/build-job-troubleshooting.png)\n\n\nRoot Cause Analysis recommended we look at the compatibility of the command\nthat was trying to be executed with the image of mandrel used in the build\njob. We were not using any command with the image so we concluded that it\nmust have been the predefined `entrypoint` for the image itself. We needed\nto override this so we asked Chat the following:\n\n\n**_How do I override the entrypoint of an image using gitlab keywords?_**\n\n\n![how-to-override-entrypoint-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-override-entrypoint-prompt.png)\n\n\nChat replied with some use case examples of overriding an image entry point.\nWe used that information to update the build job image definition:\n\n\n```\n\nbuild:\n    stage: build\n    image: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n        - “”\n```\n\n\nWe committed our changes to the `main` branch, which launched a new instance\nof the pipeline. This time the build job executed successfully but the\npipeline failed at the `containerize` job.\n\n\n## Running a successful pipeline\n\n\nBefore drilling down into the log of the failed `containerize` job, we\ndecided to drill into the log of the successfully completed build job first.\nEverything looked good in the log of the build job with the exception of\nthis warning message at the very end of it:\n\n\n```\n\nWARNING: src/main/docker/Dockerfile.native: no matching files. Ensure that\nthe artifact path is relative to the working directory …\n\n``` \n\n\nWe took notice of this warning and then headed to the log of the failed\n`containerize` job. In it, we saw that the `docker build` command had failed\ndue to a non-existent Dockerfile. We ran Root Cause Analysis on the job and\namong its suggested fixes was for us to verify that the project structure\nmatched the path of the specified `Dockerfile.native` file.\n\n\n![containerize-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/containerize-job-troubleshooting.png)\n\n\nThis information confirmed our suspicion of the misplaced\n`Dockerfile.native` file. Instead of being at the directory\n`src/main/docker` as specified in the pipeline, it was located at the root\ndirectory of the project.\n\n\nSo, we went back to our project and updated every occurrence of the location\nof this file in our `.gitlab-ci.yml` file. We modified the two locations\nwhere this happened, one in the `build` job and one in the `containerize`\njob, as follows:\n\n\n```\n\nsrc/main/docker/Dockerfile.native\n\n```\n\n\nto\n\n\n```\n\nDockerfile.native\n\n```\n\n\nWe committed our updates to the `main` branch and this time our entire\npipeline executed successfully!\n\n\n![pipeline-successful-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/pipeline-successful-run.png)\n\n\nOur last step was to check the running application in the `production`\nenvironment in our Kubernetes cluster.\n\n\n## Accessing the deployed application running in cluster\n\n\nOnce the pipeline ran successfully to completion, we drilled in the log file\nfor the `deploy` job. Remember, this job printed the URL of the application\nat the end of its execution. We scrolled down to the bottom of the log and\nclicked on the `https` application link, which opened a browser window\nwarning us that the connection was not private (we disabled `https` for the\nenvironment URL but forgot it for this string). We proceeded past the\nbrowser warning and then the string \"Hello World\" was displaced in the\nbrowser window indicating that the application was up and running in the\nKubernetes cluster.\n\n\nFinally, to double-check our production deployment URL, we headed to the\nproject **Operate > Environments** window, and clicked on the \"Open\" button\nfor it, which immediately opened a browser window with the \"Hello World\"\nmessage.\n\n\n![app-running-on-k8s](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-on-k8s.png)\n\n\n## Try it \n\n\nWe created, compiled, built, and deployed a simple Quarkus application to a\nKubernetes cluster using [GitLab Duo](https://about.gitlab.com/gitlab-duo/).\nThis approach allowed us to be more efficient and productive in all the\ntasks that we performed and it helped us streamline our DevSecOps processes.\nWe have shown only a small portion of how GitLab Duo's AI-powered\ncapabilities can help you, namely Chat and Root Cause Analysis. There’s so\nmuch more you can leverage in GitLab Duo to help you create better software\nfaster and more securely.\n\n\nWatch this whole use case in action:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDpycxz3RPY?si=HHZrFt1O_8XoLATf\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nAll the project assets we used are available\n[here](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/quarkusn/quarkus-native).\n\n\n> [Try GitLab Duo for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\nand get started on exciting projects like this.\n",[9,737,716,2477,717,738,109],{"slug":3208,"featured":91,"template":693},"use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","content:en-us:blog:use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","Use Gitlab Duo To Build And Deploy A Simple Quarkus Native Project","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"_path":3214,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3215,"content":3221,"config":3226,"_id":3228,"_type":14,"title":3229,"_source":16,"_file":3230,"_stem":3231,"_extension":19},"/en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"title":3216,"description":3217,"ogTitle":3216,"ogDescription":3217,"noIndex":6,"ogImage":3218,"ogUrl":3219,"ogSiteName":706,"ogType":707,"canonicalUrls":3219,"schema":3220},"Use GitLab Duo Workflow to improve application quality assurance","Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097617/Blog/Hero%20Images/Blog/Hero%20Images/Workflow%201800x945_2gQoQIbY9NvjLFpXtsxtXy_1750097616649.png","https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo Workflow to improve application quality assurance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-10\",\n      }",{"title":3216,"description":3217,"authors":3222,"heroImage":3218,"date":3223,"body":3224,"category":10,"tags":3225},[733],"2025-04-10","Assuring the quality of your applications via test-driven design, good code\ncoverage, and issue detection is critically important to your customers and\nyour reputation, but it can also be a time-consuming endeavor. [GitLab Duo\nWorkflow](https://about.gitlab.com/gitlab-duo/agent-platform/), agentic AI built\non top of the most comprehensive DevSecOps platform, can help you quickly\ncomplete development tasks such as adding unit tests to a Java application.\nThis tutorial demonstrates how by using this sample [Java\nproject](https://gitlab.com/gitlab-da/playground/csaavedra/gdw/prodmgr-gdw).\n\n\n> GitLab Duo Workflow is currently in private beta. Join the\n[waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see what’s\npossible with AI agents that understand your entire SDLC.\n\n\n## Opening your project in VS Code\n\n\n1. Open the Java project in Visual Studio Code (after cloning it to your\nlocal machine). Ensure that you’re in a feature branch (not the main or\ndefault branch) before you start. If you’re already working on a merge\nrequest, it will have its own associated feature branch.\n\n\n2. (This step is optional.) Navigate to the file that defines the Java class\nfor which you’d like to have GitLab Duo Workflow create unit tests. Inspect\nit so that you can later confirm that the generated unit tests do cover its\nclass members. This is what you would see:\n\n\n![File that defines the Java class for which you’d like to have GitLab Duo\nWorkflow create unit\ntests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097627482.png)\n\n\n**Note:** We are assuming that you already enabled the GitLab Duo Workflow\nextension in your VS Code. If not, please refer to the [setup\ndocumentation](https://docs.gitlab.com/user/duo_workflow/#use-workflow-in-vs-code).\n\n\n3. Launch GitLab Duo Workflow by opening the VS Code command palette [Ctrl +\nShift + P] and entering \"GitLab Duo Workflow\" in it and selecting **GitLab:\nShow Duo Workflow**. A tab will appear that looks like this:\n\n\n![Launching GitLab Duo Workflow with VS\nCode](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097627483.png)\n\n\n4. The next step is to add tests for the default constructor, the\nverification of the object creation, and the initial state of the properties\nof the Product class. To accomplish this, enter the following prompt in the\ntext area in GitLab Duo Workflow:\n\n\n```unset\n\nCreate unit tests for class defined in the Product.java file and store the\nunit tests in its own file titled ProductTest.java\n\n```\n\n\n![Prompt area in GitLab Duo\nWorkflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097627484.png)\n\n\n5. Click the **Start** button in the GitLab Duo Workflow window. Two new\nwindows will appear: one in the center of the screen and one to the right.\nThe one on the right displays the analysis that GitLab Duo Workflow is\nperforming to come up with a plan that will achieve the goal as specified in\nyour prompt. The plan is displayed in the center window. After the analysis\nand the plan are finished, you should see an output like this:\n\n\n![Analysis and plan generated by GitLab Duo\nWorkflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097627486.png)\n\n\n6. Review the analysis and plan and, if you are satisfied with them, click\n**Approve plan** at the bottom of the window.\n\n\n7. GitLab Duo Workflow will start executing the approved plan and making\nmodifications to your project accordingly.\n\n\n8. Once the execution of the plan is finished, you will see a new directory\n`src/test/java/csaa/jspring/ProductManager` in the project with a new file\nin it named `ProductTest.java`, which contains all the unit tests for the\n`Product.java` class.\n\n\n![New directory in the project iwth a new file name\n`ProductTest.java`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097627488.png)\n\n\n9. Navigate to the newly created file `ProductTest.java` and you will see\nthat it has some import statements underlined in red indicating some import\nerrors:\n\n\n![`ProductTest.java` include imports statement and error indicators in\nred](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097627489.png)\n\n\nLet’s have GitLab Duo Workflow fix these for us.\n\n\n**Note:** We could have also asked GitLab Duo Workflow in our first prompt\nto update the `pom.xml` file accordingly. But since we didn’t, let’s fix\nthese errors in a new workflow.\n\n\n## Launching a GitLab Duo Workflow to fix errors in generated code\n\n\n10. Start a new workflow by clicking on the **New workflow** button at the\nbottom of the analysis window on the right side of your screen.\n\n\n![New workflow\nbutton](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097627491.png)\n\n\n11. In the prompt text area, enter the following:\n\n\n```unset\n\nThe file ProductTest.java has an error “The import org.junit cannot be\nresolved”. Please fix it\n\n```\n\n\n12. After you approve the proposed plan, GitLab Duo Workflow starts its\nanalysis by reading the current `pom.xml` file. It then edits it and removes\nthe outdated JUnit dependency, and follows that with the addition of the\ncorrect dependency and version for JUnit. Lastly, it reads the\n`ProductTest.java` file to clear all the dependency errors.\n\n\n![GitLab Duo Workflow carrying out analysis by reading\npom.xml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097627492.png)\n\n\n## Watch the tutorial\n\n\nThrough the execution of this plan, GitLab Duo Workflow is effectively\nmaking updates to the project to achieve what was requested in the prompt,\nsaving time and effort, and increasing productivity so that developers can\nspend more time innovating and creating value for their organization.\n\n\nIf you’d like to see what you read above in action, watch the following\nvideo:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Tuj7TgqY81Q?si=RReuL1pUsLafvAzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n> Sign up for the [GitLab Duo Workflow private beta\nwaitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see what’s\npossible with AI agents that understand your entire SDLC.\n\n\n## Read more about GitLab Duo Workflow and agentic AI\n\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic\nAI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n\n- [GitLab Duo Workflow\ndocumentation](https://docs.gitlab.com/user/duo_workflow/)\n\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)\n\n- [Agentic AI: Unlocking developer potential at scale (The\nSource)](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/)\n",[9,496,737,738,717],{"slug":3227,"featured":6,"template":693},"use-gitlab-duo-workflow-to-improve-application-quality-assurance","content:en-us:blog:use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","Use Gitlab Duo Workflow To Improve Application Quality Assurance","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"_path":3233,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3234,"content":3238,"config":3244,"_id":3246,"_type":14,"title":3247,"_source":16,"_file":3248,"_stem":3249,"_extension":19},"/en-us/blog/vibe-coding-with-gitlab-duo-agent-platform-issue-to-mr-flow",{"config":3235,"title":3236,"description":3237},{"noIndex":6},"Vibe coding with GitLab Duo Agent Platform: 'Issue to MR' Flow","Learn how to update your application in minutes with our newest agent Flow that takes developers from idea to code.",{"title":3239,"description":3237,"authors":3240,"heroImage":949,"date":3241,"body":3242,"category":10,"tags":3243},"Vibe coding with GitLab Duo Agent Platform: Issue to MR Flow",[733],"2025-09-03","[GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) (available now in Beta) provides a framework for AI agents to interact with GitLab resources like issues and merge requests, enabling complex, multistep tasks from concept to completion. Agent Platform offers conversational ([agentic chat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/)) and automated ([agent Flows](https://about.gitlab.com/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering/)) experiences to assist with code generation, modernization, security vulnerability resolution, and project analysis — all while providing enterprise-grade security and customizable controls.\n\n\"Issue to MR\" is an agent Flow that streamlines turning a well-scoped issue into a draft merge request (MR). The Flow analyzes an issue’s description and requirements, opens a draft MR linked to the issue, creates a development plan, and proposes an implementation — right from the GitLab UI.\n\n## The developer challenge\n\nProduct tweaks such as rearranging a UI layout, adjusting component sizing, or making a minor workflow change shouldn't require hours of setup work. Yet developers find themselves caught in a frustrating cycle: hunting through codebases to locate the right files, creating branches, piecing together scattered changes across multiple components, and navigating complex review processes. And this is all before they can even see if their solution works. Development overhead transforms what should be quick iterations into time-consuming tasks, slowing down feedback loops and making simple product improvements feel like major undertakings.\n\n## How to use the Issue to MR Flow to accelerate an application update\n\nYou first need to fulfill these prerequisites before using the Issue to MR Flow.\n\nPrerequisites:\n\n* An existing issue with clear requirements and acceptance criteria. This will help GitLab Duo Agent Platform better understand what you're trying to achieve and improve the quality of your output.  \n* Project access at Developer or higher permissions as the Flow will be making edits to your project's source code.  \n* GitLab Duo Agent Platform enabled for your group or project, with Flows allowed. For this, go to your project’s **Settings > General > GitLab Duo > Allow flow execution** toggle and enable it. GitLab is committed to helping provide guardrails, so agentic AI features require turning on these toggles to protect sensitive projects and ensure only the projects you want are accessible to GitLab Duo Agent Platform.\n\nOnce you have fulfilled all the prerequisites above, you can follow these steps to take advantage of the Issue to MR Flow:\n\n1. Create a project issue that describes what you’d like GitLab Duo Agent Platform to accomplish for you. Provide as much detail as possible in the issue description. If the issue already exists, open it by going to **Plan > Issues** and clicking on the issue describing the update you want. Keep the issue well-scoped and specific.\n\n2. Below the issue header, click on **Generate MR with Duo** to kick off the Flow.\n\n3. If you’d like to track the progress of the agents working on implementing your issue, go to **Automate > Agent sessions** to see the live session log as agents plan and propose changes.\n\n4. When the pipeline completes, a link to the MR appears in the issue’s activity. Open it to review the summary and file-level changes.\n\n5. If you’d like to validate the proposed updates by GitLab Duo Agent Platform locally, you can pull the branch on your laptop, build and run your app, and verify that the update behaves as expected. If needed, make edits in the MR and proceed with normal review.\n\n6. If you’re happy with all the proposed application updates, merge the MR to the main branch.\n\n## Why the Issue to MR Flow works well for application changes\n\nThe Issue to MR Flow proposes code changes and updates the MR directly, so you spend less time locating files and only have to evaluate and review the result. In addition, the MR is automatically linked to the originating issue, keeping context tight for reviewers and stakeholders. Finally, you can monitor the agent session to understand what’s happening at each step.\n\n## Benefits of GitLab Duo Agent Platform\n\nGitLab Duo Agent Platform is [an agentic orchestration layer](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) that brings **full project context**, including planning to coding, building, securing, deploying, and monitoring, so agents can help across the entire software development lifecycle (SDLC), not just code editing.\n\n* Unified data model: GitLab Duo Agents operate on GitLab’s unified SDLC data, enabling higher-quality decisions and collaboration across tasks — including non-coding ones.\n\n* Security and compliance are built-in: GitLab Duo Agents run within enterprise guardrails and are usable even in highly-regulated or offline/air-gapped environments.\n\n* Interoperability and extensibility: Orchestrate flows across vendors and tools; connect external data via [MCP](https://about.gitlab.com/topics/ai/model-context-protocol/)/A2A for richer context.\n\n* Scale collaboration: GitLab Duo Agents work in the GitLab UI and IDEs, enabling many-to-many human-agent collaboration.\n\n* Discoverable and shareable: Find and share agents and Flows in a centralized AI Catalog.\n\n## Try the Issue to MR Flow today\n\nFor application updates, like a modest UI adjustment, the Issue to MR Flow helps you move from a clear issue to a reviewable MR quickly, with progress you can monitor and changes you can validate and merge through your standard workflow. It keeps context, reduces handoffs, and lets your team focus on quality rather than busywork.\n\nWatch the Issue to MR Flow in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BrrMHN4gXF4?si=J7beTgWOLxvS4hOw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> Try the Issue to MR Flow on GitLab Duo Agent Platform now with a [free trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/).\n\n\n",[9,738,717,737],{"featured":91,"template":693,"slug":3245},"vibe-coding-with-gitlab-duo-agent-platform-issue-to-mr-flow","content:en-us:blog:vibe-coding-with-gitlab-duo-agent-platform-issue-to-mr-flow.yml","Vibe Coding With Gitlab Duo Agent Platform Issue To Mr Flow","en-us/blog/vibe-coding-with-gitlab-duo-agent-platform-issue-to-mr-flow.yml","en-us/blog/vibe-coding-with-gitlab-duo-agent-platform-issue-to-mr-flow",{"_path":3251,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3252,"content":3258,"config":3262,"_id":3264,"_type":14,"title":3265,"_source":16,"_file":3266,"_stem":3267,"_extension":19},"/en-us/blog/what-is-a-large-language-model-llm",{"title":3253,"description":3254,"ogTitle":3253,"ogDescription":3254,"noIndex":6,"ogImage":3255,"ogUrl":3256,"ogSiteName":706,"ogType":707,"canonicalUrls":3256,"schema":3257},"What is a large language model (LLM)?","Learn how large language models work, their applications, and their impact on the DevSecOps world.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660057/Blog/Hero%20Images/LLM.jpg","https://about.gitlab.com/blog/what-is-a-large-language-model-llm","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is a large language model (LLM)?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2025-05-29\",\n      }",{"title":3253,"description":3254,"authors":3259,"heroImage":3255,"date":1779,"body":3260,"category":10,"tags":3261},[774],"Large language models (LLMs) are revolutionizing DevOps and DevSecOps approaches by simplifying complex tasks, such as code creation, log analysis, and vulnerability detection.\n\nIn this article, you will learn how LLMs work, their practical applications, and the main challenges to overcome in order to fully harness their potential.\n\n## What is an LLM?\n\nLLMs are artificial intelligence (AI) systems that can process and generate text autonomously. They are trained by analyzing vast amounts of data from a variety of sources, enabling them to master the linguistic structures, contextual relationships, and nuances of language.\n\nLLMs are a major breakthrough in the field of AI. Their ability to process, generate, and interpret text relies on sophisticated machine learning and natural language processing (NLP) techniques. These systems do not just process individual words; they analyze complex sequences to capture the overall meaning, subtle contexts, and linguistic nuances.\n\n## How do LLMs work?\n\nTo better understand how they work, let's explore some of the key features of large language models.\n\n### Supervised and unsupervised learning\n\nLLMs are trained using two complementary approaches: supervised learning and unsupervised learning. These two approaches to machine learning maximize their ability to analyze and generate text.\n\n* **Supervised learning** relies on labeled data, where each input is associated with an expected output. The model learns to associate these inputs with the correct outputs by adjusting its internal parameters to reduce prediction errors. Through this approach, the model acquires precise knowledge about specific tasks, such as text classification or named entity recognition.\n\n* **Unsupervised learning (or machine learning)**, on the other hand, does not require labeled data. The model explores large volumes of text to discover hidden structures and identify semantic relationships. The model is therefore able to learn recurring patterns, implicit grammatical rules in the text, and contextualization of sentences and concepts. This method allows LLMs to be trained on large corpora of data, greatly accelerating their progress without direct human action.\n\nBy combining these two approaches, large language models gain the advantages of both precise, human-guided learning and unlimited autonomous exploration. This complementarity allows them to develop rapidly, while continuously improving their ability to understand and generate text coherently and contextually.\n\n### Learning based on a large volume of data\n\nLLMs are trained on billions of sentences from a variety of sources, such as news articles, online forums, technical documentation, scientific studies, and more. This variety of sources allows them to acquire a broad and nuanced understanding of natural language, ranging from everyday expressions to specialized terminology.\n\nThe richness of the data used is a key factor in LLMs' performance. Each source brings different writing styles, cultural contexts, and levels of technicality. \n\nFor example:\n\n* **News articles** to master informative and factual language \n* **Online forums** to understand specialized communities' informal conversations and technical language  \n* **Technical documentation and scientific studies** to assimilate complex concepts and specific terminology, particularly in areas such as DevOps and DevSecOps\n\nThis diversity of content allows LLMs to recognize complex linguistic structures, interpret sentences in different contexts, and adapt to highly technical domains. In DevSecOps, this means understanding commands, configurations, security protocols, and even concepts related to the development and maintenance of computer systems.\n\nWith this large-scale training, LLMs can accurately answer complex questions, write technical documentation, or identify vulnerabilities in computer systems.\n\n### Neural network architecture and \"deep learning\"\n\nLLMs are based on advanced neural network architectures. These networks are specially designed to process large sequences of text while maintaining an accurate understanding of the context. This deep learning-based training is a major asset in the field of NLP.\n\nThe best-known of these structures is the architecture of sequence-to-sequence models (transformers). This architecture has revolutionized NLP with its ability to simultaneously analyze all parts of a text, unlike sequential approaches that process words one by one.\n\nSequence-to-sequence models excel at processing long texts. For example, in a conversation or a detailed technical document, they are able to link distant information in the text to produce precise and well-reasoned answers. This context management is essential in a DevSecOps approach, where instructions can be complex and spread over multiple lines of code or configuration steps.\n\n### Predictive text generation\n\nWhen the user submits a text, query, or question, an LLM uses its predictive ability to generate the most likely sequence, based on the context provided.\n\nThe model analyzes each word, studies grammatical and semantic relationships, and then selects the most suitable terms to produce a coherent and informative text. This approach makes it possible to generate precise, detailed responses adapted to the expected tone.\n\nIn DevSecOps environments, this capability becomes particularly useful for:\n\n* **Coding assistance:** generation of code blocks or scripts adapted to specific configurations\n* **Technical problem solving:** proposing solutions based on descriptions of bugs or errors\n* **Drafting technical documentation:** automatic creation of guides, manuals, or instructions\n\nPredictive text generation thus makes it possible to automate many repetitive tasks and speed up technical teams' work.\n\n## Applications of large language models in a DevSecOps approach\n\nWith the rise of automation, LLMs have become indispensable allies for technical teams. Their ability to understand and generate text contextually enables them to effectively operate in complex environments such as [DevSecOps](https://about.gitlab.com/topics/devsecops/).\n\nWith their analytical power and ability to adapt to specific needs, these models offer tailored solutions to streamline processes and lighten technical teams' workload.\n\nDevelopment teams can leverage LLMs to automatically transform functional specifications into source code. \n\nWith this capability, they can perform the following actions:\n- generate complex automation scripts\n- create CI/CD pipelines tailored to specific business processes\n- produce customized security patches\n- generate code explanation and create documentation\n- refactor code by improving code structure and readability without changing functionality\n- generate tests\n\nBy relying on LLMs, teams are able to accelerate the development of their software while reducing the risk of human error.\n\n### Improved documentation and knowledge sharing\n\nThese powerful tools make it easy to create customized user manuals, API descriptions, and tutorials that are perfectly tailored to each user's level of expertise. By leveraging existing knowledge bases, LLMs create contextual answers to frequently asked questions. This enhances knowledge sharing within teams, speeds up onboarding of new members, and helps centralize best practices.\n\n### Incident management and troubleshooting\nDuring an incident, LLMs play a crucial role in analyzing logs and [trace files](https://docs.gitlab.com/ee/development/tracing.html) in real time. Thanks to their ability to cross-reference information from multiple sources, they identify anomalies and propose solutions based on similar past incidents. This approach significantly reduces diagnosis time. In addition, LLMs can automate the creation of detailed incident reports and recommend specific corrective actions.\n\n### Creating and improving CI/CD pipelines\n\nLLMs are revolutionizing the configuration of [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/). They can not only help create pipelines, but also automate this process and suggest optimal configurations based on industry standards. By adapting workflows to your specific needs, they ensure perfect consistency between different development environments. Automated testing is enhanced by relevant suggestions, limiting the risk of failure. LLMs also continuously monitor the efficiency of pipelines and adjust processes to ensure smooth and uninterrupted rollout.\n\n### Security and compliance\n\nIn a DevSecOps environment, large language models become valuable allies for security and compliance. They parse the source code for potential vulnerabilities and generate detailed patch recommendations. LLMs can also monitor the application of security standards in real time, produce comprehensive compliance reports, and automate the application of security patches as soon as a vulnerability is identified. This automation enhances overall security and ensures consistent compliance with legal and industry requirements.\n\n## What are the benefits of large language models?\n\nLLMs are radically reshaping DevOps and DevSecOps approaches, bringing substantial improvements in productivity, security, and software quality. By integrating with existing workflows, LLMs are disrupting traditional approaches by automating complex tasks and providing innovative solutions.\n\n### Improved productivity and efficiency\n\nLLMs play a central role in improving technical teams' productivity and efficiency. By automating a wide range of repetitive tasks, they free development teams from routine operations, allowing them to focus on strategic activities with higher added value.\n\nIn addition, LLMs act as intelligent technical assistants capable of instantly providing relevant code snippets, tailored to the specific context of each project. In this way, they significantly reduce research time by offering ready-to-use solutions to assist teams in their work. This targeted assistance speeds up problem solving and reduces disruptions in workflows.\nAs a result, productivity increases and projects move forward more quickly. Technical teams can take on more tasks without compromising the quality of deliverables.\n\n### Improved code quality and security\n\nThe use of large language models in software development is a major lever for improving both code quality and application security. With their advanced analytical capabilities, LLMs can scan source code line by line and instantly detect syntax errors, logical inconsistencies, and potential vulnerabilities. Their ability to recognize defective code allows them to recommend appropriate fixes that comply with industry best practices.\n\nLLMs also play a key preventive role. They excel at identifying complex security flaws that are often difficult for humans to detect. By analyzing dependencies, they can flag obsolete or vulnerable libraries and recommend more secure, up-to-date versions. This approach contributes to maintaining a secure environment that complies with current security standards.\n\nBeyond fixing existing errors, LLMs offer improvements by suggesting optimized coding practices and project structures. They can generate code that meets the most advanced security standards from the earliest stages of development.\n\n### Accelerating development lifecycles\n\nLarge language models play a key role in accelerating software development lifecycles by automating key tasks that would otherwise tie up valuable human resources. Complex and repetitive tasks, such as writing functions, creating unit tests, or implementing standard components, are automated in a matter of moments.\n\nLLMs also speed up the validation phase with their ability to suggest complete and appropriate test cases. They ensure broader test coverage in less time, reducing the risk of errors and enabling early detection of anomalies. This preventive approach shortens the correction cycle and limits delays related to code quality issues.\n\nBy simplifying technical tasks and providing fast and tailored solutions, large language models enable businesses to respond to market demands in a more agile way. This acceleration of the development lifecycle results in more frequent updates, faster iterations, and a better ability to adapt products to users' changing needs.\n\nDevelopment lifecycles are becoming shorter, providing a critical strategic advantage in an increasingly demanding technology landscape.\n\n## What are the challenges of using LLMs?\n\nDespite their many benefits, large language models have certain limitations that require careful management. Their effectiveness depends heavily on the quality of the data used during their training and regular updates to their knowledge bases. In addition, issues related to algorithmic bias, data security, and privacy can arise, exposing companies to operational and legal risks. Rigorous human oversight remains essential in order to ensure the reliability of results, maintain regulatory compliance, and prevent critical errors.\n\n### Data privacy and security\n\nTraining LLMs relies on large volumes of data, often from diverse sources, raising questions about the protection of confidential information. Sensitive data shared with cloud platforms can therefore be exposed to potential breaches. This is of particular concern to companies operating in regulated sectors. \n\nIn Europe, where strict regulations like GDPR govern data management, many companies are reluctant to transfer their information to external services. Regulatory requirements, coupled with the fear of unauthorized exploitation of sensitive data, have led some companies to opt for self-hosted solutions to maintain complete control over their systems.\n\nProviders like GitLab have put in place robust security guarantees, such as intentional non-retention of personal data and end-to-end encryption. However, this may not be enough for the most demanding customers, who prefer complete control of their environments. Implementing hybrid or on-premises solutions then becomes a strategic necessity to meet the security requirements of certain companies.\n\nLearn more about GitLab Duo Self-Hosted by clicking on the image below to access our product tour.\n\n[![GitLab Duo Self-Hosted tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673815/Blog/Content%20Images/Screenshot_2025-05-29_at_8.29.30%C3%A2__AM.png)](https://gitlab.navattic.com/gitlab-duo-self-hosted)\n\n### Accuracy and reliability\n\nAlthough large language models are capable of producing impressive results, their performance is not infallible. They can produce incorrect, incomplete, or inconsistent answers. This inaccuracy becomes particularly problematic in the context of critical tasks such as generating security code or analyzing sensitive data.\n\nIn addition, LLMs operate on the basis of probabilistic models, which means that they do not truly \"understand\" the content they process, but produce predictions based on statistical probabilities. This can lead to technically incorrect or even dangerous recommendations when used without human validation.\n\nTo avoid these pitfalls, it is essential to maintain constant oversight and establish rigorous validation processes. The results provided by LLMs must always be reviewed by humans before being integrated into critical systems.\n\nA strategy of regular model updates, combined with proactive human oversight, can reduce errors and gradually improve the reliability of results.\n\n## How GitLab uses LLMs for GitLab Duo features\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) harnesses the power of large language models to transform DevSecOps processes by integrating AI-powered capabilities throughout the software development lifecycle. This approach aims to improve productivity, strengthen security, and automate complex tasks so that development teams can focus on high added-value tasks.\n\n### AI-assisted software development\n\nGitLab Duo provides continuous support throughout the software development lifecycle with real-time recommendations. Development teams can automatically generate unit tests, get detailed explanations of complex code segments, and benefit from suggestions to improve the quality of their code.\n\n### Proactive CI/CD failure analysis\n\nOne of the key features of GitLab Duo is its assistance in analyzing CI/CD job failures. With LLM and AI, teams are able to quickly identify sources of errors in their continuous integration and deployment pipelines. \n\n### Enhanced code security\n\nGitLab Duo incorporates AI-based security features. The system detects vulnerabilities in the source code and proposes detailed patches to reduce the risks. Teams receive clear explanations of the nature of the vulnerabilities identified and can apply automated patches via [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) generated directly by GitLab Duo. This feature helps secure development without slowing down development lifecycles.\n\nLearn more about GitLab Duo Vulnerability Explanation and Resolution by clicking on the image below to access our product tour.\n\n[![Vulnerability report interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673816/Blog/Content%20Images/Screenshot_2025-05-29_at_8.32.15%C3%A2__AM.png)](https://gitlab.navattic.com/ve-vr-short)\n\n### Key features of GitLab Duo\n\n* [GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/): This conversational feature processes and generates text and code intuitively. It allows users to quickly search for relevant information in large volumes of text, including in tickets, [epics](https://docs.gitlab.com/ee/user/group/epics/), source code, and [GitLab documentation](https://docs.gitlab.com/).\n\n* [GitLab Duo Self-Hosted](https://about.gitlab.com/blog/gitlab-duo-self-hosted-enterprise-ai-built-for-data-privacy/): GitLab Duo Self-Hosted allows companies with strict data privacy requirements to benefit from GitLab Duo's AI capabilities with flexibility in choosing deployment and LLMs from a list of supported options.\n\n* [GitLab Duo Code Suggestions](https://about.gitlab.com/direction/create/code_creation/code_suggestions/): Development teams benefit from automated code suggestions, allowing them to write secure code faster. Repetitive and routine coding tasks are automated, significantly speeding up software development lifecycles.\n\nGitLab Duo is not limited to these features. It offers a wide range of features designed to simplify and optimize software development. Whether it's automating testing, improving collaboration between teams, or strengthening project security, GitLab Duo is a complete solution for smart and efficient DevSecOps processes.\n\nLearn more about GitLab Duo Enterprise by clicking on the image below to access our product tour. \n\n[![GitLab Duo Enterprise interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673816/Blog/Content%20Images/Screenshot_2025-05-29_at_8.33.40%C3%A2__AM.png)](https://gitlab.navattic.com/duo-enterprise)",[9,716],{"slug":3263,"featured":6,"template":693},"what-is-a-large-language-model-llm","content:en-us:blog:what-is-a-large-language-model-llm.yml","What Is A Large Language Model Llm","en-us/blog/what-is-a-large-language-model-llm.yml","en-us/blog/what-is-a-large-language-model-llm",{"_path":3269,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3270,"content":3275,"config":3280,"_id":3282,"_type":14,"title":3283,"_source":16,"_file":3284,"_stem":3285,"_extension":19},"/en-us/blog/what-the-ml-ai",{"title":3271,"description":3272,"ogTitle":3271,"ogDescription":3272,"noIndex":6,"ogImage":868,"ogUrl":3273,"ogSiteName":706,"ogType":707,"canonicalUrls":3273,"schema":3274},"What the ML is up with DevSecOps and AI?","AI will revolutionize DevSecOps platforms. Learn about where GitLab is today and what we're working on.","https://about.gitlab.com/blog/what-the-ml-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What the ML is up with DevSecOps and AI?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-03-16\",\n      }",{"title":3271,"description":3272,"authors":3276,"heroImage":868,"date":3277,"body":3278,"category":10,"tags":3279},[1455],"2023-03-16","\n\nGitLab believes at our core that [AI will revolutionize the power of DevSecOps platforms](//topics/devops/the-role-of-ai-in-devops/) to bring to life a software development experience that feels straight out of science fiction. GitLab users already benefit from a step-function increase in productivity when they adopt our platform: streamlined collaboration, operational efficiencies, and massive acceleration in time to delivery. But by introducing machine learning (ML) and other artificial intelligence (AI) capabilities into the fabric of The DevSecOps Platform feature set, we aim to take those gains to a whole new level.\n\nGitLab occupies a unique seat in relation to defining how AI and ML will impact DevSecOps into the future. As the creators of the DevSecOps platform category, we are the founders behind a successful philosophy for bringing DevSecOps principles into practice. By virtue of curating the entire software development lifecycle, our platform also has an unrivaled level of visibility into the code, configuration, testing, deployment, and operation of the applications it produces. It is in the rich data set underpinning that curated experience where unbounded opportunity lurks. These opportunities include the ability to deliver:\n\n- **Faster deployments**: By automating various aspects of the software development lifecycle, including testing and deployment, [AI/ML can help DevSecOps](/blog/why-ai-in-devops-is-here-to-stay/) teams deliver software faster and more reliably.\n- **Improved security**: AI/ML can help identify and mitigate potential security threats by analyzing data patterns and behavior. It can also automate security testing and analysis, leading to faster and more accurate detection and remediation of vulnerabilities.\n- **Enhanced quality assurance**: AI/ML can help automate quality assurance processes by analyzing data patterns and identifying potential issues in code, leading to faster testing, fewer bugs, and higher quality software.\n- **Intelligent monitoring and alerting**: AI/ML can help monitor systems in real time, analyzing data from logs, alerts, and other sources to detect anomalous behavior and potential security threats.\n- **Predictive analytics**: AI/ML can help DevSecOps teams predict potential issues, identify patterns, and make data-driven decisions to improve their software before issues become critical.\n\nGitLab will focus on incorporating AI/ML capabilities that leverage our unique strengths to deliver unique value. In particular, we plan to:\n\n- incorporate generative AI into GitLab to massively simplify, accelerate, or entirely obviate parts of the software development process,\n- use the unique data set at our disposal to make novel connections and surface insights to users about their teams, their processes, and the software they are building - insights that are otherwise lost to the voids of piecemeal DevOps toolchains.\n- build the MLOps and DataOps plumbing that will enable organizations to build and deploy AI/ML workloads using GitLab.\n\n## AI/ML in GitLab today\n\nHere are some of the ways we are using AI/ML in GitLab today.\n\n### AI/ML for automation\n\nFirst, we are applying ML and AI to automate mundane tasks and reduce the cognitive load for our customers. We are currently developing [AI Assisted capabilities](/direction/modelops/ai_assisted/) to improve productivity and efficiency for everyone in the software delivery workflow. Here are some AI Assisted capabilities available in GitLab today:\n\n![Suggested Review Screenshot](https://about.gitlab.com/images/15_4/create-code-review-suggested-reviewers.png){: .shadow.col-sm-4.right.wrap-text}\n\n- **Suggested Reviewers**, [released last September](/releases/2022/09/22/gitlab-15-4-released/#suggested-reviewers-open-beta), automatically suggests the best available reviewer for a merge request. This capability removes the guesswork by ensuring the right reviewer with the right contextual knowledge is reviewing code changes so that developers can deploy software more efficiently. Early users have told us that Suggested Reviewers minimizes delays and leads to better reviews. They now have more confidence in the code they deploy. The tool has generated tens of thousands of suggested reviewers to more efficiently and securely review code on our platform. You can learn more about the feature and how to enable it in [our Suggested Reviewers documentation](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers).\n\n- **GitLab Code Suggestions**, [released in closed beta this past February](/releases/2023/02/22/gitlab-15-9-released/#code-suggestions-available-in-closed-beta), aims to increase developer speed and productivity by providing code suggestions in GitLab’s VS Code IDE plugin. We’re actively building this into GitLab’s new Web IDE and Remote Development solution as well. Ultimate customers interested in joining the beta of Code Suggestions [can fill out this form](https://forms.gle/cbjqJhLGV1i7t6Sd8). Additional information about Code Suggestions can be found on [our direction page](/direction/modelops/ai_assisted/code_suggestions/).\n\n![Animated gif image of code suggestions](https://about.gitlab.com/images/15_9/DemoFastApi.gif){: .shadow}\n\n### Protecting customer source code\n\nWe’ve heard from many of our enterprise DevSecOps customers that their organizations care deeply about the privacy of their source code. They understandably want control over who processes their data and if their code is used to train code generation AI models. That’s why we’ve built our code suggestions feature to work natively within GitLab. Customer source code does not leave the GitLab instance and it is not used to retrain generic multi-customer code generation models.\n\n## The road ahead for GitLab and AI\n\nWe plan to add many AI capabilities throughout our DevSecOps platform, including:\n\n- automating mundane tasks across the software development lifecycle with [Workflow Automation](/direction/ai-powered/) including assigning, labeling, and summarizing.\n- reducing the risk due to insecure coding practices by automatically detecting and help remediating code quality and security vulnerabilities with [Intelligent Code Security](/direction/ai-powered/).\n- augmenting developers with generative [Code Suggestions](/direction/modelops/ai_assisted/code_suggestions/) while writing, reviewing, and fixing code.\n\nWe also want to make it easier for customers to build and deploy amazing AI/ML-backed applications to their customers faster. We are working to integrate [ModelOps](/direction/modelops/) features into the GitLab DevSecOps Platform to better support data science workloads and extend DevSecOps workflows to AI and ML workloads. This includes:\n\n- enabling data science teams to work seamlessly within the Gitlab platform with [better support for python notebooks](https://docs.gitlab.com/ee/user/project/repository/jupyter_notebooks/) and [GPU runners](https://docs.gitlab.com/runner/configuration/gpus.html).\n- improving handoffs between data science teams and DevSecOps teams with a native [GitLab Model Registry](https://gitlab.com/groups/gitlab-org/-/epics/9423).\n\n## Follow along\n\nThis blog is the first in [an ongoing series](/blog/ai-ml-in-devsecops-series/) about GitLab’s journey to [build and integrate ML/AI into our DevSecOps platform. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams that will showcase how we’re infusing AI/ML into GitLab.\n\nWe believe AI is going to dramatically change the way teams work and the way organizations develop, secure, and operate software. We’re using our core value of iteration and our experience building the most comprehensive DevSecOps platform to bring the power of AI/ML to bear on the software development lifecycle.\n\nWant to continue reading about AI? Check out the next blog in this series:[How AI-assisted code suggestions will advance DevSecOps](https://about.gitlab.com/blog/ai-assisted-code-suggestions/)!\n\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,717],{"slug":3281,"featured":6,"template":693},"what-the-ml-ai","content:en-us:blog:what-the-ml-ai.yml","What The Ml Ai","en-us/blog/what-the-ml-ai.yml","en-us/blog/what-the-ml-ai",{"_path":3287,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3288,"content":3294,"config":3299,"_id":3301,"_type":14,"title":3302,"_source":16,"_file":3303,"_stem":3304,"_extension":19},"/en-us/blog/whats-next-for-devsecops",{"title":3289,"description":3290,"ogTitle":3289,"ogDescription":3290,"noIndex":6,"ogImage":3291,"ogUrl":3292,"ogSiteName":706,"ogType":707,"canonicalUrls":3292,"schema":3293},"GitLab’s 2023 predictions: What’s next for DevSecOps?","Check out insights on securing the supply chain, new uses for AI/ML, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663820/Blog/Hero%20Images/prediction.jpg","https://about.gitlab.com/blog/whats-next-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s 2023 predictions: What’s next for DevSecOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":3289,"description":3290,"authors":3295,"heroImage":3291,"date":3296,"body":3297,"category":1201,"tags":3298},[3071],"2023-01-26","\nIn 2023, organizations will focus their time and resources on the continued shift left of security, completing the evolution from DevOps to [DevSecOps](/topics/devsecops/). GitLab Chief Marketing and Strategy Officer [Ashley Kramer](https://gitlab.com/akramer) says that every company will need to have security tightly integrated into DevOps to combat the increased threats throughout the software development lifecycle. In addition, DevSecOps teams will have to continue to focus on supply chain security, make optimal use of artificial intelligence and machine learning, and expand their use of value stream analytics. GitLab leaders from across disciplines share these predictions and more about how the industry will change this year.\n\n## Prediction 1: Protecting the supply chain will be the top priority\n\nSecurity will continue to be an organization-wide responsibility, shifting further left and spanning from [the IDE](/blog/get-ready-for-new-gitlab-web-ide/) to applications running in production, according to  [David DeSanto](https://gitlab.com/david), Chief Product Officer.\n\nIn our [2022 Global DevSecOps survey](https://about.gitlab.com/developer-survey/), 57% of sec team members said their orgs have either shifted security left or are planning to this year. Half of security professionals report that developers are failing to identify security issues – to the tune of 75% of vulnerabilities.\n\nThe shift left will be driven in part by the need for [tighter security for software supply chains](/blog/the-ultimate-guide-to-software-supply-chain-security/). “As remote development becomes more and more commonplace, software supply chain security will play a more expansive role across the software development lifecycle,” DeSanto says.\n\n[Francis Ofungwu](https://gitlab.com/fofungwu), Global Field CISO, predicts this supply chain security evolution will happen in three key ways:\n\n- The engineering frontlines will take on more ownership of managing threats in their day-to-day operations. In order to accomplish this, developers will need real-time context on vulnerabilities and remediation strategies in each phase of the software development lifecycle (SDLC), consequently reducing the likelihood of painful incidents in production environments.\n\n- Security and compliance teams will invest in transcribing their software assurance expectations into policy-as-code to reduce the manual and time-consuming security review processes that reduce development velocity.\n\n- As a result of headline-grabbing incidents highlighting enterprise risks in modern software development, organizations will build audit programs to better assess and report SDLC risks. This will require organizations to design how to deliver artifacts that prove the immutability of the controls deployed across all aspects of their development toolchain.\n\nOrganizations should also expect that “what have been best practices for supply chain security for many years, will now become regulatory requirements,” says [Corey Oas](https://gitlab.com/corey-oas), Manager, Security Compliance (Dedicated Markets). He points to [artifact attestation and software bill of materials (SBOM) generation](/blog/the-ultimate-guide-to-sboms/) as examples of best practices that will soon become federal government or industry mandates. “Both of these are integral to developer workflows.”\n\n[Sam White](https://gitlab.com/sam.white), Group Manager, Product - Govern, doubles down on the SBOM and artifact attestation prediction, saying both SBOMs and attestations will need ongoing attention from DevSecOps teams. “Expect to see a shift from looking at these as one-time events to them becoming part of a continuous evaluation process,” he says, adding that organizations will need deeper visibility into software dependencies (e.g. open source packages) and more centralization of software build information.\n\nAnother element of software supply chain security is [zero trust](/blog/why-devops-and-zero-trust-go-together/). “Organizations have considered zero trust strategies for a while, and it will be an implementation focus for them going forward,” predicts [Joel Krooswyk](https://gitlab.com/jkrooswyk), GitLab Federal CTO. “One reason for this movement, at least among federal agencies and their suppliers, is the recent release of the Department of Defense zero trust architecture strategy and roadmap and the inclusion of zero trust principles in several National Institute of Standards and Technology publications such as [800-207](https://csrc.nist.gov/publications/detail/sp/800-207/final).”\n\n> Get more public sector predictions with our webcast [“2022 Lookback & 2023 Predictions in Cybersecurity & Zero Trust with GitLab”](https://page.gitlab.com/2022_devsecopsusecase_Lookback_Predictions_PubSec_RegistrationPage.html)\n\n## Prediction 2: Security will burrow deep into DevOps education\n\nTo mirror the transformation of DevOps to DevSecOps, [DevOps training and education](/blog/5-ways-to-bring-devops-to-your-campus/) will include security as a key part of the curricula, White says. “Organizations will have to provide access to the training that developers need to get a baseline security knowledge, including why certain vulnerabilities are important and should be addressed right away,” he says.\n\n[Pj Metz](https://gitlab.com/PjMetz), Education Evangelist, believes 2023 will be the year that “Shift Left principles will show up in university classrooms.”\n\n“Already, the GitLab for Education team has seen more and more requests for information on DevSecOps, and not just in computer science and programming. Information systems students are looking to learn more about DevSecOps as well,” he says. ”Integrating security education directly into DevOps curricula will ensure that future professionals will be prepared for all aspects of DevSecOps.”\n\nAnd he encourages DevOps students to [ask for security to be added into their education](https://about.gitlab.com/the-source/security/the-future-of-devops-education-needs-to-include-security/) so they will be properly prepared for the workforce.\n\n## Prediction 3: AI/ML will be used throughout the SDLC\n\n“AI will become essential for productivity,” Kramer says. “For example, DevOps teams will integrate AI/ML to automate repetitive and difficult tasks. Ideally, this would ease the burden on developers by removing their cognitive load, decreasing the amount of context-switching they have to do, and enabling them to stay in the flow of development.\"\n\nAccording to our 2022 Global DevSecOps survey, 62% of respondents practice ModelOps, while 51% use AI/ML to check code.\n\n“Combining digital transformation with business analytics and AI - real transformations are possible,” says [Christina Hupy](https://gitlab.com/c_hupy), Sr. Manager, Community Programs. “As more of their data is input, businesses can draw actual insights and use AI to continuously improve their systems.”\n\nDeSanto agrees and predicts that [AI-assisted workflows will gain popularity](/blog/why-ai-in-devops-is-here-to-stay/) in application development. “AI/ML will further enable rapid development, security remediation, improved test automation, and better observability,” he says.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager of Product for Data Science, says that while AI/ML certainly isn’t new, making technologies such as open-ended AI accessible to consumers, set an expectation to figure out how it could be better used in software development (think code completion and other such tasks).\n\nHe predicts that while AI/ML will be used all along the SDLC, organizations will grapple with privacy concerns, preserving intellectual property (such as AI-generated code ownership) and permissiveness of licenses for training data sets and algorithms.\n\nAt the same time, he says to look for “more rapid development in the MLOps and DataOps spaces to help developers manage, maintain, and iterate on production software systems that leverage ML and AI.” (Note: GitLab is investing in our ModelOps stage to help support the development of data science-enriched software within the GitLab platform.)\n\n## Prediction 4: Value stream analytics will take on a greater role in organizations\n\nThe digital transformation that organizations will undergo this year will require a deeper commitment to [examining value streams](/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/). “Value stream analytics will extend past development workflows to provide a more holistic view of the value organizations deliver to their users (both internal and external),” DeSanto says.\n\nExecutive leadership will seek out metrics that give insight into how digital transformation and technological investments are delivering value and driving business results. This is a shift from solely focusing on development efficiencies. The 2022 Global DevSecOps survey found that 75% of respondents are either using a DevOps platform or plan to move to one within a year with one of the drivers of this change being metrics and observability.\n\n## Prediction 5: Observability will shift left for efficient DevSecOps\n\n[Observability](/direction/monitor/platform-insights/) will also move further left in the SDLC, according to [Michael Friedrich](https://gitlab.com/dnsmichi), Senior Developer Evangelist. “Observability-driven development will enable everyone to become more efficient and inspire innovation,\" he says.\n\nNew observability-enabling technologies like [eBPF](https://ebpf.io/what-is-ebpf) will help developers with automated code instrumentation instead of adding more workload with manual code instrumentation. eBPF also supports better observability and security workflows in cloud-native environments.\n\nObservability will play a critical role in improving the efficiency of DevSecOps workflows, including CI/CD, infrastructure cost analysis, and trending/forecasting for better capacity planning.\n\n_What do you think will be the big DevSecOps technology advancements this year? Let us know your predictions in the comments below._\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Sign up for our webcast [“GitLab’s DevSecOps Innovations and Predictions for 2023”](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge.\n[Register](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) today!\n\nCover image by [Drew Beamer](https://unsplash.com/@dbeamer_jpg?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n",[917,691,9,717],{"slug":3300,"featured":6,"template":693},"whats-next-for-devsecops","content:en-us:blog:whats-next-for-devsecops.yml","Whats Next For Devsecops","en-us/blog/whats-next-for-devsecops.yml","en-us/blog/whats-next-for-devsecops",{"_path":3306,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3307,"content":3311,"config":3316,"_id":3318,"_type":14,"title":3319,"_source":16,"_file":3320,"_stem":3321,"_extension":19},"/en-us/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops",{"config":3308,"title":3309,"description":3310},{"noIndex":6},"Why enterprise independence matters more than ever in DevSecOps","As leaders question who controls their development infrastructure, GitLab's independence and transparency advantages have never been more relevant.\n",{"title":3309,"description":3310,"authors":3312,"date":1589,"body":3313,"category":10,"tags":3314,"heroImage":3315},[2340],"For over a decade, GitLab has been committed to transparency, independence, and putting developers first. Today, as the industry evolves, this matters more than ever. Enterprise leaders are asking critical questions: Who ultimately controls your development infrastructure? How is your code being used in AI systems? What happens when vendor priorities shift away from your critical requirements?\n\nLast month, [we announced GitLab 18.3](https://about.gitlab.com/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering/), the latest release of our AI-native DevSecOps platform. Agent Insights, part of GitLab Duo Agent Platform, provides visibility into agent decision-making processes. Expanded AI model support means no vendor lock-in. Enhanced governance controls help enable compliance across multiple jurisdictions.\n\nThese aren't just features. They're demonstrations of the transparency, independence, and developer-first approach that defines GitLab. Here's how this strategy translates into practice.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1115249475?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Who is GitLab_Robin_090225_FINAL\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## AI transparency across the DevSecOps lifecycle\n\n**At GitLab, our decade-long commitment to transparency directly addresses these concerns.** As artificial intelligence becomes increasingly integrated into development workflows, organizations are rightfully concerned about how their code and data are being used for AI training.\n\nThe GitLab [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/), launched in April 2024, provides clear documentation of our data governance practices, privacy protections, and ethical AI principles. Unlike platforms that may operate AI features with unclear data usage policies, GitLab prioritizes transparency so that customers  can understand [exactly how their data is processed](https://docs.gitlab.com/user/gitlab_duo/data_usage/), stored, and protected — with no training on that data.\n\nOur approach extends to model flexibility and vendor independence. While some platforms lock customers into single, large language model (LLM) providers, creating additional vendor dependencies and potential single points of failure, GitLab's AI features are powered by a variety of models. This approach enables us to support a wide range of use cases, providing customers with the flexibility to align with their strategic priorities.\n\nAs we further develop GitLab Duo Agent Platform, we remain focused on data control and maintaining comprehensive human-in-the-loop controls. And GitLab Duo Self-Hosted provides complete data sovereignty with air-gapped deployment options, zero-day data retention policies, and the ability to process all AI requests within your own infrastructure.\n\nSince May 2024, we've also maintained an [AI continuity plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/) with an industry-leading commitment: the ability to evaluate and move to a new model within 30 days if a provider changes its practices regarding customer data. This proactive approach to AI vendor risk management reflects our dedication to customer control.\n\n## Choice in deployment, choice in cloud provider\n\n**You should be able to choose how and where to deploy your DevSecOps environment.** GitLab provides genuine deployment flexibility. Organizations can choose from on-premises installations, multi-tenant SaaS, or GitLab Dedicated, our fully managed single-tenant SaaS solution, without sacrificing functionality or facing artificial restrictions designed to drive ecosystem lock-in. GitLab is also cloud-neutral, allowing customers to use the cloud provider that best suits their business needs and environment.\n\nThis flexibility proves invaluable when navigating complex jurisdictional requirements and regulatory challenges. When new data localization laws emerge — as we've seen across the European Union and other regions — organizations using GitLab can rapidly adapt their deployment strategies without being constrained by ecosystem dependencies.\n\nFrom a procurement and risk management perspective, platform independence also provides crucial leverage in contract negotiations. Organizations aren't forced into restrictive licensing agreements that prioritize vendor interests over customer needs. This independence becomes particularly critical as enterprises become more vigilant about who controls their AI stack.\n\n## Security and compliance: Built-in and always a priority\n\n**Security and compliance are now equally important to development features and should be built into the platform, not retrofitted as an afterthought.** GitLab's single platform approach provides significant advantages over fragmented platforms that rely on third-party add-ins to match basic security and governance features. This architectural difference has significant implications for possible legal risk, operational efficiency, and regulatory compliance. Each additional tool in the chain represents another potential point of failure, another set of terms and conditions to negotiate, and another source of risk.\n\nGitLab provides comprehensive built-in security and compliance capabilities, including custom compliance frameworks, dynamic application security testing (DAST), API fuzz testing, coverage-guided fuzzing, and infrastructure-as-code testing. These capabilities are natively integrated into the platform, offering consistent policy enforcement and reducing the compliance complexity and additional costs that come with managing multiple third-party tools.\n\nOur [compliance center](https://docs.gitlab.com/user/compliance/compliance_center/) provides a central location for teams to manage their compliance standards, adherence reporting, violations reporting, and compliance frameworks for their group. This unified approach to compliance management is particularly valuable for organizations operating in highly-regulated industries where audit trails and compliance documentation are critical.\n\n## Staying close to our open source community\n\n**The best tools are shaped by the people who use them.** Our commitment to open source and engagement with our community has been core to GitLab since our founding. For instance, our [Co-Create program](https://about.gitlab.com/community/co-create/) is a collaborative initiative that enables customers to work directly with GitLab engineers to contribute features, fixes, and enhancements to the GitLab platform.\n\nOur transparency value remains fundamental to our business. An example of this is our [open issue tracker](https://gitlab.com/groups/gitlab-org/-/issues), where customers can follow our progress and engage directly with the GitLab team in discussions about ways we can improve our product. We recently launched our [Healthy Backlog Initiative](https://about.gitlab.com/blog/inside-gitlabs-healthy-backlog-initiative/) to give customers even greater visibility into our planning and direct their feedback to places with the greatest impact.\n\nOur approach enables organizations to contribute to and benefit from open source innovation while maintaining the governance, audit trails, and security controls required for regulated environments.\n\n## Data governance: Your data, your control\n\n**You maintain complete control over your data and how it's processed.** Data governance has become an increasingly critical factor in enterprise technology decisions, driven by a complex web of national and regional data protection laws and growing concern about control over sensitive intellectual property — like source code, customer insights, strategic initiatives, and competitive intelligence.\n\nWith GitLab, you can manage who has access to AI-powered capabilities within the platform, extending beyond simple access controls to encompass encryption standards and audit capabilities aligned to regulatory frameworks. Also, customers' code and data are never used to train AI models.\n\n## The choice is clear\nGitLab continues to lead in AI-native DevSecOps platform innovation – our recent [18.3 release](https://about.gitlab.com/blog/gitlab-18-3-expanding-ai-orchestration-in-software-engineering/) demonstrates this – while staying true to the independence and transparency commitments that have always guided us.\n\nCustomers have a choice and it's clear: retaining control vs. vendor lock-in; transparency vs. uncertainty; dedication to innovation vs. whims of the larger ecosystem.\n\nGitLab provides the foundation for sustainable digital transformation that balances innovation with independence, helping you achieve business value for your customers.\n\n[Try GitLab Ultimate with GitLab Duo for free today.](https://about.gitlab.com/free-trial/)",[9,716,738,717,896],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1756500636/wmey6kqzzuhirk88w2de.png",{"featured":91,"template":693,"slug":3317},"why-enterprise-independence-matters-more-than-ever-in-devsecops","content:en-us:blog:why-enterprise-independence-matters-more-than-ever-in-devsecops.yml","Why Enterprise Independence Matters More Than Ever In Devsecops","en-us/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops.yml","en-us/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops",{"_path":3323,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3324,"content":3329,"config":3334,"_id":3336,"_type":14,"title":3337,"_source":16,"_file":3338,"_stem":3339,"_extension":19},"/en-us/blog/why-hackerone-gets-love-letters-from-developers",{"title":3325,"description":3326,"ogTitle":3325,"ogDescription":3326,"noIndex":6,"ogImage":2394,"ogUrl":3327,"ogSiteName":706,"ogType":707,"canonicalUrls":3327,"schema":3328},"Why HackerOne gets love letters from its developers","Learn how HackerOne is making developers more productive while ensuring that security is built into their software development processes.","https://about.gitlab.com/blog/why-hackerone-gets-love-letters-from-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why HackerOne gets love letters from its developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-11-14\",\n      }",{"title":3325,"description":3326,"authors":3330,"heroImage":2394,"date":3331,"body":3332,"category":2866,"tags":3333},[1379],"2023-11-14","At [HackerOne](/customers/hackerone/), a cybersecurity company, using the GitLab DevSecOps Platform has changed developers’ team experience and culture so much they’re basically sending the company love letters about their jobs.\n\nThis is no exaggeration, according to HackerOne Senior Director of Platform and Infrastructure Russell Owen, who spoke to an audience at GitLab’s DevSecOps World Tour in Mountain View, California, this fall. The 11-year-old company adopted GitLab in 2018 for source code and issues management, CI/CD, and security and compliance – features that didn’t exist in its previous tooling system. Since then, developers have become more productive — and happier.\n\nHackerOne isn’t alone here. According to GitLab’s [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/), 28% of the more than 5,000 respondents said using DevOps made their developers happier. Productivity, efficiency, and automation go a long way to making developers’ jobs easier and more enjoyable.\n\n## Measuring for DevSecOps impact\n\n“We run surveys every quarter. Are the developers happy? What do they find frustrating? Where's the friction?” Owen said in an on-stage interview with [Sherrod Patching](https://about.gitlab.com/company/team/#spatching), vice president of customer success management at GitLab. “We compare our metrics across the industry to see how we're doing.”\n\nAnd they’re doing well. “For instance, just making our [CI/CD pipelines](/blog/how-to-learn-ci-cd-fast/) as tight as possible makes people more effective. We've done a lot of work on optimizing that inside of GitLab,” Owen added. “And we get what count as love letters from our developers. People are so excited because the pipeline times have come down substantially, and it has a direct impact on the productivity of the team.”\n\nWhile Owen said he definitely wants his DevSecOps team members to be happy, he also wants productivity to be high. And he noted the importance of evaluating a variety of metrics — from happiness to the number of merge requests and releases — to enable teams to quantify their impact on the company. And those numbers are good, too.\n\nWith GitLab, merge requests per engineer are up by 50% over the last year, and they’ve cut their code release time by 50% over the last two years, according to Owen, who added that the number of quarterly releases to production jumped by 73% since two years ago.\n\n“A lot of that is from just shaving time off the CI/CD pipeline,” he said. “That’s efficiency. That’s productivity. It’s important because I need to be able to justify our work and expenses. These kinds of metrics make it very easy for me to say: ‘Investing in this area has a measurable return on investment.’”\n\n## Using GitLab for security and compliance\n\nWhen a business is known around the world for cybersecurity, assuring the safety and reliability of its own code has to be a priority. HackerOne uses GitLab to ensure that security is built into their software products and processes, according to Owen.\n\nHe explained their need, for example, to keep the number of people who have access to their production systems to a minimum. The fewer people who can touch the system, the fewer potential entry points to introduce vulnerabilities. The platform enables the team to build in guidelines mandating that any change needs to be reviewed and approved by the infrastructure team. So when teams make a change to the infrastructure, it’s only applied to the system once it’s been accepted.\n\n“Changes are being reviewed by people who have expertise in the area,” said Owen. “This really allows efficiency, but also maintains security. And it's all done inside GitLab.”\n\nHackerOne also relies on the GitLab platform to make sure they are staying in line with strict [industry compliance requirements](/blog/meet-regulatory-standards-with-gitlab/). Limiting the number of people who have access to the system is an example of that.\n\nOwen said they also use GitLab to [stay compliant](/blog/top-5-compliance-features-to-leverage-in-gitlab/) with FedRAMP, the Federal Risk and Authorization Management Program, a set of standards the U.S. federal government requires for moving sensitive information into the cloud.\n\n“We do more and more work with containers and FedRAMP requires that they be scanned before being moved into production,” he added. “You can’t just bring them into your infrastructure directly. We stage and scan everything in GitLab — all the containers, particularly third-party containers. Then, if they meet our criteria, we move them on so they’re eligible for production.”\n\nWith GitLab, HackerOne remains FedRAMP compliant for all of its federal customers.\n\n## Looking to an AI future\n\nWhen asked what he thought they’d be focused on over the next five years, Owen replied, “AI, AI, AI.”\n\nFor HackerOne, which brings in immense amounts of data, Owen said he’s looking to artificial intelligence to help them cull that flood of information to find patterns and pull out what will be useful to their customers.\n\n“Whatever business you're in, for a long time, there has been a lot of data,” Owen said. “And you can't help but wonder how you can use it to provide value to your customers, right? There has been a sea change in the last few years, from AI being something that was kind of theoretically interesting to something that is operationally useful. So if you're not doing AI... Well, I'm sure we’re all doing AI. You have to do it now. And the tools are just incredible.”\n\n_HackerOne’s Owen is a software developer, designer, and IT leader with more than 20 years of experience building advanced systems. He previously worked at Google as an engineering manager in the company’s privacy and security department, as well as at Research in Motion, where he was responsible for the Blackberry’s infrastructure design._\n\n_Read more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/)._",[716,496,9,2868],{"slug":3335,"featured":91,"template":693},"why-hackerone-gets-love-letters-from-developers","content:en-us:blog:why-hackerone-gets-love-letters-from-developers.yml","Why Hackerone Gets Love Letters From Developers","en-us/blog/why-hackerone-gets-love-letters-from-developers.yml","en-us/blog/why-hackerone-gets-love-letters-from-developers",{"_path":3341,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3342,"content":3347,"config":3352,"_id":3354,"_type":14,"title":3355,"_source":16,"_file":3356,"_stem":3357,"_extension":19},"/en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"title":3343,"description":3344,"ogTitle":3343,"ogDescription":3344,"noIndex":6,"ogImage":1623,"ogUrl":3345,"ogSiteName":706,"ogType":707,"canonicalUrls":3345,"schema":3346},"Write Terraform plans faster with GitLab Duo Code Suggestions","Follow this tutorial to learn how to use AI-powered code creation to manage your infrastructure with Terraform.","https://about.gitlab.com/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write Terraform plans faster with GitLab Duo Code Suggestions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-01-24\",\n      }",{"title":3343,"description":3344,"authors":3348,"heroImage":1623,"date":3349,"body":3350,"category":10,"tags":3351},[1722],"2024-01-24","[Terraform](https://www.terraform.io/) is an industry-standard for infrastructure orchestration. It can, however, be daunting and time-consuming to learn how to create infrastructure resources, especially when using Terraform providers you are unfamiliar with. That is where GitLab Duo Code Suggestions and AI-powered code creation comes in.\n\n[GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) accelerates your coding in up to [15 supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-languages-in-ides), including infrastructure as code (IaC) using Terraform.\n\nBy combining IaC with Terraform, infrastructure teams can adopt new Terraform providers/modules quickly and move changes to production faster. The combination also reduces the onboarding time for new users of Terraform or developers who are new to a code base by maintaining the context of the code base and dependencies in its suggestions. Instead of spending hours reading through documentation, you only need to review the given suggestions and update as necessary.\n\nIn this post, you will learn how to set up GitLab Duo Code Suggestions for Terraform.\n\n## How to set up GitLab Duo Code Suggestions for Terraform\n\nFollow these 6 steps to get started:\n\n1. Install the GitLab extension for your IDE of choice (read about [supported IDE extensions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-editor-extensions).\n2. Authenticate the GitLab extension with GitLab.com or your GitLab self-hosted instance.\n3. Enable Code Suggestions in your Profile settings under the Preferences > Preferences menu. Search for Code Suggestions, toggle the checkbox, and click Save.\n4. Confirm that Code Suggestions is also enabled in your GitLab extensions settings in the IDE.\n5. If you are using Visual Studio Code and Neovim, third-party extension support is needed to use Terraform with Code Suggestions. For VS Code, you can install the [official Terraform extension](https://marketplace.visualstudio.com/items?itemName=HashiCorp.terraform) from HashiCorp.\n6. To test and apply your Terraform plans, [install Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) in your terminal and any necessary provider SDKs.\n\n## Creating your plans\n\nOnce you have all the prerequisites in place, all you need to do next is create a Terraform file ending with `.tf` and prompt GitLab Duo with comments describing what you want. Suggestions will be displayed while maintaining context. Even when no prompt is provided, GitLab Duo will suggest other Terraform resources you might be interested in based on the current context in your plans.\n\nLet's see this in action with GitLab Duo helping with a Terraform plan for provisioning a load balancer with four instances and other necessary resources on the Google Cloud Platform.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Get started with Code Suggestions and Terraform today\n\n[Try Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) today and accelerate your software development. Code Suggestions is also available for [self-managed GitLab](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/self_managed.html) (needs to be enabled by an administrator). Code Suggestions uses best-in-class large language models, and non-public customer data is never used as training data.",[496,9,737],{"slug":3353,"featured":91,"template":693},"write-terraform-plans-faster-with-gitlab-duo-code-suggestions","content:en-us:blog:write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","Write Terraform Plans Faster With Gitlab Duo Code Suggestions","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"_path":3359,"_dir":3360,"_draft":6,"_partial":6,"_locale":7,"content":3361,"config":3367,"_id":3369,"_type":14,"title":3370,"_source":16,"_file":3371,"_stem":3372,"_extension":19},"/en-us/blog/external-url/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more","external-url",{"title":3362,"description":3363,"heroImage":3364,"date":3365,"category":10,"tags":3366},"AI trends for 2025: Agentic AI, self-hosted models, and more","Discover coming trends in AI for software development, from on-premises model deployments to proactive AI assistants.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665039/Blog/Hero%20Images/display-the-source-article-ai-trends-coming-in-2026-image-0492-1800x945-fy25.png","2025-01-13",[9],{"featured":6,"externalUrl":3368},"https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/","content:en-us:blog:external-url:ai-trends-for-2025-agentic-ai-self-hosted-models-and-more.yml","Ai Trends For 2025 Agentic Ai Self Hosted Models And More","en-us/blog/external-url/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more.yml","en-us/blog/external-url/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more",{"_path":3374,"_dir":246,"_draft":6,"_partial":6,"_locale":7,"seo":3375,"content":3381,"config":3386,"_id":3388,"_type":14,"title":3389,"_source":16,"_file":3390,"_stem":3391,"_extension":19},"/en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",{"title":3376,"description":3377,"ogTitle":3376,"ogDescription":3377,"noIndex":6,"ogImage":3378,"ogUrl":3379,"ogSiteName":706,"ogType":707,"canonicalUrls":3379,"schema":3380},"10 best practices for using AI-powered GitLab Duo Chat","Explore tips and tricks for integrating GitLab Duo Chat into your AI-powered DevSecOps workflows. Plus, expert advice on how to refine chat prompts for the best results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097639/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_77JeTV9gAmbXM0224acirV_1750097638765.png","https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 best practices for using AI-powered GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-04-02\",\n      }",{"title":3376,"description":3377,"authors":3382,"heroImage":3378,"date":3383,"body":3384,"category":10,"tags":3385},[1262],"2024-04-02","Getting into a conversation with AI can be challenging. What question do you\nstart with? How do you frame the question? How much context is needed? Will\nthe conversation provide the best and most efficient results?\n\n\nIn this tutorial, we explore 10 tips and best practices to integrate GitLab\nDuo Chat into your AI-powered DevSecOps workflows and refine your prompts\nfor the best results.\n\n\n[Get started: Keep GitLab Duo Chat open and in\nsight](#get-started-keep-gitlab-duo-chat-open-and-in-sight)\n\n\n[10 best practices for using GitLab Duo\nChat](#10-best-practices-for-using-gitlab-duo-chat)\n\n\n1. [Have a conversation](#1.-have-a-conversation)\n\n2. [Refine the prompt for more\nefficiency](#2.-refine-the-prompt-for-more-efficiency)\n\n3. [Follow prompt patterns](#3.-follow-prompt-patterns)\n\n4. [Use low-context communication](#4.-use-low-context-communication)\n\n5. [Repeat yourself](#5.-repeat-yourself)\n\n6. [Be patient](#6.-be-patient)\n\n7. [Reset and start anew](#7.-reset-and-start-anew)\n\n8. [Gain efficiency with slash commands in the\nIDE](#8.-gain-efficiency-with-slash-commands-in-the-ide)\n\n9. [Refine the prompt for slash\ncommands](#9.-refine-the-prompt-for-slash-commands)\n\n10. [Get creative with slash\ncommands](#10.-get-creative-with-slash-commands)\n\n\nBonus content:\n\n- [Shortcuts](#shortcuts)\n\n- [Fun exercises](#fun-exercises)\n\n- [Learn more](#learn-more)\n\n\n> Live demo! Discover the future of AI-driven software development with our\nGitLab 17 virtual launch event. [Register\ntoday!](https://about.gitlab.com/seventeen/)\n\n\n## Get started: Keep GitLab Duo Chat open and in sight\n\n\n[GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) is\navailable in the GitLab UI, Web IDE, and supported programming IDEs, for\nexample, VS Code. \n\n\nIn VS Code, you can open GitLab Duo Chat in the default left pane. You can\nalso drag and drop the icon into the right pane. This allows you to keep\nChat open while you write code and navigate the file tree, perform Git\nactions, etc. To reset the Chat location, open the command palette (by\npressing the `Command+Shift+P` (on macOS) or `Ctrl+Shift+P` (on\nWindows/Linux) keyboard shortcut and then type `View: Reset View Locations`.\nThe following short video shows you how to do it.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/foZpUvWPRJQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nThe Web IDE and VS Code share the same framework – the same method works in\nthe Web IDE for more efficient workflows.\n\n\n![Chat in Web\nIDE](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097645344.png)\n\n\n## 10 best practices for using GitLab Duo Chat\n\n\n### 1. Have a conversation\n\n\nChats are conversations, not search forms.\n\n\nFor the first conversation icebreaker, you can start with the same search\nterms similar to a browser search and experiment with the response and\noutput. In this example, let's start with a C# project and best practices. \n\n\n> c# start project best practices\n\n\n![Chat prompt for C# start project best practices and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097646/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097645345.png)\n\n\nThe response is helpful to understand a broad scope of C#, but does not\nkickstart immediate best practices. Let's follow up with a more focused\nquestion in the same context. \n\n\n> Please show the project structure for the C# project.\n\n\n![Chat prompt for project structure for the C# project and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097645346.png)\n\n\nThis answer is helpful. Next, let's follow up with a Git question, and use\nthe same question structure: Direct request to show something.\n\n\n> Show an example for a .gitignore for C#\n\n\n![Chat prompt for a .gitignore for C# and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image25_aHR0cHM6_1750097645347.png)\n\n\nContinue with CI/CD and ask how to build the C# project.\n\n\n> Show a GitLab CI/CD configuration for building the C# project\n\n\n![Chat prompt for GitLab CI/CD configuration for building C# project and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image16_aHR0cHM6_1750097645349.png)\n\n\nIn this example, Chat encouraged us to request specific changes. Let's ask\nto use the .NET SDK 8.0 instead of 6.0. \n\n\n> In the above example, please use the .NET SDK 8.0 image\n\n\n![Chat prompt to use .NET SDK 8.0 image and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image32_aHR0cHM6_1750097645350.png)\n\n\nThe CI/CD configuration uses the .NET command line interface (CLI). Maybe we\ncan use that for more efficient commands to create the projects and tests\nstructure, too? \n\n\n> Explain how to create projects and test structure on the CLI \n\n\n![Chat prompt to explain how to create projects and test structure on the\nCLI and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750097645351.png)\n\n\nOf course, we could execute these commands in the terminal, but what if we\nwanted to stay in VS Code? Let's ask Chat.\n\n\n> Explain how to open a new terminal in VS Code\n\n\n![Chat prompt to explain how to open a new terminal in VS Code and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097645351.png)\n\n\n### 2. Refine the prompt for more efficiency\n\n\nThink of GitLab Duo Chat as a human, and engage with full sentences that\nprovide as much context into your thoughts and questions. \n\n\nExperienced browser search users might know this approach to queries: Build\nup the question, add more terms to refine the scope, and restart the search\nafter opening plenty of tabs. \n\n\nIn a browser search, this probably would result in four to five different\nsearch windows. \n\n\n```markdown\n\nc# start project best practices\n\nc# .gitignore\n\nc# gitlab cicd \n\nc# gitlab security scanning \n\nc# solutions and projects, application and tests\n\n``` \n\n\nYou can follow this strategy in a chat conversation, too. It requires adding\nmore context, making it a conversational approach. GitLab Duo Chat enables\nyou to ask multiple questions in one conversation request. Example: You need\nto start with a new C# project, apply best practices, add a `.gitignore`\nfile, and configure CI/CD and security scanning, just like in the above\nsearch. In Chat, you can combine the questions into one request.\n\n\n> How can I get started creating an empty C# console application in VS Code?\nPlease show a .gitignore and .gitlab-ci.yml configuration with steps for C#,\nand add security scanning for GitLab. Explain how solutions and projects in\nC# work, and how to add a test project on the CLI.\n\n\n![Chat prompt adding more context and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image37_aHR0cHM6_1750097645352.png)\n\n\nIn this response, Chat suggests to ask for specific configuration examples\nin follow-up questions in the conversation. Async practice: Create follow-up\nquestions. You can omit `C#` as context in the same chat session.\n\n\n> Please show an example for a .gitignore. Please show a CI/CD\nconfiguration. Include the SAST template.\n\n\n### 3. Follow prompt patterns \n\n\nFollow the pattern: `Problem statement, ask for help, provide additional\nrequests`. Not everything comes to mind when asking the first question –\ndon't feel blocked, and instead start with `Problem statement, ask for help`\nin the first iteration. \n\n\n> I need to fulfill compliance requirements. How can I get started with\nCodeowners and approval rules?\n\n\n![Chat prompt to get started with Codeowners and approval rules and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750097645352.png)\n\n\nThe answer is helpful but obviously generic. Now, you may want to get\nspecific help for your team setup. \n\n\n> Please show an example for Codeowners with different teams: backend,\nfrontend, release managers.\n\n\n![Chat prompt to show an example for Codeowners with different teams:\nbackend, frontend, release managers and reponse\n](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image31_aHR0cHM6_1750097645353.png)\n\n\nAn alternative is to describe the situation you are in and to ask for input.\nIt can feel a bit like a conversation to follow the STAR model (Situation,\nTask, Action, Results). \n\n\n> I have a Kubernetes cluster integrated in GitLab. Please generate a Yaml\nconfiguration for a Kubernetes service deployment. Explain how GitOps works\nas a second step. How to verify the results?\n\n\n![Chat prompt with multiple questions and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image27_aHR0cHM6_1750097645354.png)\n\n\n### 4. Use low-context communication \n\n\nProvide as much context as needed to provide an answer. Sometimes, the\nprevious history or opened source code does not provide that helpful\ncontext. To make questions more efficient, apply a pattern of [low-context\ncommunication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication),\nwhich is used in all-remote communication at GitLab.\n\n\nThe following question did not provide enough context in a C++ project.\n\n\n> Should I use virtual override instead of just override?\n\n\n![Chat prompt asking if the users should use virtual override instead of\njust override and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image34_aHR0cHM6_1750097645354.png)\n\n\nInstead, try to add more context:\n\n\n> When implementing a pure virtual function in an inherited class, should I\nuse virtual function override, or just function override? Context is C++. \n\n\n![Chat prompt with more detail and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image36_aHR0cHM6_1750097645355.png)\n\n\nThe example is also shown in the [GitLab Duo Coffee Chat: Refactor C++\nfunctions into OOP classes for abstract database\nhandling](https://youtu.be/Z9EJh0J9358?t=2190). \n\n\n### 5. Repeat yourself\n\n\nAI is not predictable. Sometimes, it may not answer with the expected\nresults, or does not produce source code examples or configuration snippets\nbecause it lacked context. It is recommended to repeat the question and\nrefine the requirements.\n\n\nIn the following example, we want to create a C# application. In the first\nattempt, we did not specify the application type – C# can be used to create\nconsole/terminal but also UI applications. The result also does not provide\nan empty example source code. The second, repeated prompt adds two more\nwords - `console` and `empty`. \n\n\n> How can I get started creating an C# application in VSCode?\n\n> \n\n> How can I get started creating an empty C# console application in VSCode?\n\n\nThe results in the prompt differ. The first response is helpful to get\nstarted by following the instructions in the VS Code window, but it does not\ntell us where the source code is located and how to modify it. The repeated\nprompt with refinements modifies the response and provides instructions how\nto override the default template with some “hello world” code.\n\n\n![Chat prompt with repeated prompt with modifications and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image28_aHR0cHM6_1750097645355.png)\n\n\nYou can also combine repeat and refine strategies, and ask Chat to show an\nexample for application code and tests.\n\n\n> How can I get started creating an empty C# console application in VSCode?\nPlease show an example for application and tests.\n\n\n![Chat prompt that asks for example for application and tests and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097645356.png)\n\n\n#### Repeat yourself after generic questions \n\n\nWhen asking generic technology questions, GitLab Duo Chat might not be able\nto help. In the following scenario, I wanted to get a suggestion for Java\nbuild tools and framework, and it did not work. There could be many answers:\nMaven, Gradle, etc., as build tools, and [100+ Java\nframeworks](https://en.wikipedia.org/wiki/List_of_Java_frameworks),\ndepending on the technology stack and requirements.\n\n\n![Chat prompt for Java build tools and framework and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097645356.png)\n\n\nLet's assume that we want to focus on a customer environment with [Java\nSpring Boot](https://spring.io/projects/spring-boot). \n\n\n> I want to create a Java Spring Boot application. Please explain the\nproject structure and show a hello world example.\n\n\n![Chat prompt that asks for more, including a hello world example and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image26_aHR0cHM6_1750097645357.png)\n\n\nThis provides great results already. As an async exercise, repeat the\nprompt, and ask how to deploy the application, adding more refinements in\neach step. Alternatively, you can make it a follow-up conversation.\n\n\n> I want to create a Java Spring Boot application. Please explain the\nproject structure and show a hello world example. Show how to build and\ndeploy the application in CI/CD.\n\n> \n\n> I want to create a Java Spring Boot application. Please explain the\nproject structure and show a hello world example. Show how to build and\ndeploy the application in CI/CD, using container images.\n\n> \n\n> I want to create a Java Spring Boot application. Please explain the\nproject structure and show a hello world example. Show how to build and\ndeploy the application in CI/CD, using container images. Use Kubernetes and\nGitOps in GitLab.\n\n\n### 6. Be patient\n\n\nSingle words or short sentences might not generate the desired results, [as\nshown in this video example](https://youtu.be/JketELxLNEw?t=1220).\nSometimes, GitLab Duo Chat is able to guess from available data, but\nsometimes also might insist on providing more context.\n\n\nExample: `labels` matches the GitLab documentation content.\n\n\n![Chat prompt about labels and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097645357.png)\n\n\nRefine the question to problem statements and more refinements for issue\nboard usage.\n\n\n> Explain labels in GitLab. Provide an example for efficient usage with\nissue boards.\n\n\n![Chat prompt that includes asking for an example and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750097645358.png)\n\n\nOr use a problem statement, followed by a question and the ask for\nadditional examples.\n\n\n> I don't know how to use labels in GitLab. Please provide examples, and how\nto use them for filters in different views. Explain these views with\nexamples.\n\n\n![Chat prompt with problem statement and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097645358.png)\n\n\nAlso, avoid `yes/no` questions and instead add specific context.\n\n\n> Can you help me fix performance regressions?\n\n\n![Chat promptt that asks for help with fixing performance regressions and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750097645359.png)\n\n\nInstead, provide the context of the performance regression, including the\nprogramming languages, frameworks, technology stack, and environments. The\nfollowing example uses an environment from some years ago, which can still\nbe accurate today.\n\n\n> My PHP application encounters performance regressions using PHP 5.6 and\nMySQL 5.5. Please explain potential root causes, and how to address them.\nThe app is deployed on Linux VMs.\n\n\n![Chat prompt that includes more detail and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image24_aHR0cHM6_1750097645360.png)\n\n\n### 7. Reset and start anew\n\n\nSometimes, the chat history shows a different learning curve and provides\nthe wrong context for follow-up questions. Or, you asked specific questions\nwhere GitLab Duo Chat cannot provide answers. Since generative AI is not\npredictable, it might also lack the ability to provide certain examples, but\nthink it gave them in a future response (observed in Chat Beta). The\nunderlying large language models, or LLMs, sometimes might insist on giving\na specific response, in an endless loop.\n\n\n> How can I get started creating an empty C# console application in VSCode?\nPlease show a .gitignore and .gitlab-ci.yml configuration with steps for C#,\nand add security scanning for GitLab. Explain how solutions and projects in\nC# work, and how to add a test project on the CLI.\n\n\nAfter asking the question above with an example configuration, I wanted to\nreduce the scope of the question to get a more tailored response. It did not\nwork as expected, since Chat knows about the chat history in context, and\nrefers to previous answers.\n\n\n> How can I get started creating an empty C# console application in VSCode?\nPlease show a .gitignore and .gitlab-ci.yml configuration with steps for C#.\n\n\n![Chat prompt that asks for configuration examples and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image23_aHR0cHM6_1750097645360.png)\n\n\nTo force Chat into a new context, use `/reset` as slash command to reset the\nsession, and repeat the question to get better results. You can also use\n`/clean` or `/clear` to delete all messages in the conversation.\n\n\n### 8. Gain efficiency with slash commands in the IDE \n\n\n#### Explain code\n\n\n- Q: Generated code? Existing code? Legacy code?\n\n- A: Use the [`/explain` slash command in the\nIDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide).\n\n- A2: Refine the prompt with more focused responses, for example: `/explain\nfocus on potential shortcomings or bugs`. \n\n\n![Chat prompt with /explain slash\ncommand](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/gitlab_duo_chat_slash_commands_explain_01_aHR0cHM6_1750097645361.png)\n\n\n![Chat prompt with refined\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097645361.png)\n\n\n#### Refactor code \n\n\n- Q: Unreadable code? Long spaghetti code? Zero test coverage?\n\n- A: Use the [`/refactor` slash command in the\nIDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide). \n\n- A2: Refine the prompt for more targeted actions, for example\nobject-oriented patterns: `/refactor into object-oriented classes with\nmethods and attributes`. \n\n\n![Chat prompt with /refactor slash\ncommand](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image35_aHR0cHM6_1750097645362.png)\n\n\n![Chat prompt with refined\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image30_aHR0cHM6_1750097645362.png)\n\n\n#### Generate tests\n\n\n- Q: Testable code but writing tests takes too much time?\n\n- A: Use the [`/tests` slash command in the\nIDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide).\n\n- A2: Refine the prompt for specific test frameworks, or test targets. You\ncan also instruct the prompt to focus on refactoring, and then generate\ntests: `/tests focus on refactoring the code into functions, and generate\ntests`.\n\n\n![Chat prompt with /tests slash\ncommand](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image29_aHR0cHM6_1750097645363.png)\n\n\n![Chat prompt with refined\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097645363.png)\n\n\nMore practical examples in complete development workflows are available in\nthe [GitLab Duo\nexamples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html)\ndocumentation.\n\n\n### 9. Refine the prompt for slash commands \n\n\nYou will see refined prompts tips in this blog post a lot. It is one of the\ningredients for better AI-powered workflow efficiency. Slash commands are no\ndifferent, and allow for better results in GitLab Duo Chat.\n\n\nA customer recently asked: \"Can code explanations using `/explain` create\ncomments in code?\" The answer is: no. But you can use the Chat prompt to ask\nfollow-up questions, and ask for a summary in a code comment format. It\nrequires the context of the language. \n\n\nThe following example with a [C++ HTTP client code using the curl\nlibrary](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5cc9bdd65ee8ee16c548bea0402c18f8209d4d06/chat/slash-commands/c++/cli.cpp)\nneeds more documentation. You can refine the `/explain` prompt by giving\nmore refined instructions to explain the code by adding code comments, and\nthen copy-paste that into the editor.\n\n\n> /explain add documentation, rewrite the code snippet\n\n\n![Chat prompt to add documentation and rewrite code snippet and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750097645363.png)\n\n\nAlternatively, you can ask Chat to `/refactor` the source code, and generate\nmissing code comments through a refined prompt.\n\n\n> /refactor add code comments and documentation\n\n\n![Chat prompt to refactor source code and generate code\ncomments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750097645364.png)\n\n\n### 10. Get creative with slash commands\n\n\nWhen the Chat prompt does not know an answer to a question about the source\ncode or programming language, look into the slash commands `/explain`,\n`/refactor`, and `/tests` and how much they can help in the context.\n\n\nIn the following example, an SQL query string in C++ is created in a single\nline. To increase readability, and also add more database columns in the\nfuture, it can be helpful to change the formatting into a multi-line string.\n\n\n> std::string sql = \"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY\nKEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT NOT NULL)\";\n\n\nYou can ask GitLab Duo Chat about it, for example, with the following\nquestion:\n\n\n> How to create a string in C++ using multiple lines?\n\n\nChat may answer with an explanation and optional, a source code example. In\nthis context, it can interpret the question to create a C++ string value\nwith multiple lines, for example, using the `\\n` character, assigned to a\nvariable. \n\n\nThe requirement instead is to only format the written code, and variable\nvalue assignment in multiple lines. The string value itself does not need to\ncontain a multi-line string representation. \n\n\nThere is an alternative for additional context in VS Code and the Web IDE:\nSelect the source code in question, right-click, and navigate into `GitLab\nDuo Chat > Refactor`. This opens the Chat prompt and fires the `/refactor`\ncode task immediately.\n\n\nAlthough, the code task might not bring the expected results. Refactoring a\nsingle-line SQL string can mean a lot of things: Use multiple lines for\nreadability, create constants, etc.\n\n\nCode tasks provide an option to refine the prompt. You can add more text\nafter the `/refactor` command, and instruct GitLab Duo Chat to use a\nspecific code type, algorithm, or design pattern. \n\n\nLet's try it again: Select the source code, change focus into Chat, and type\nthe following prompt, followed by `Enter`. \n\n\n> /refactor into a multi-line written string. Show different approaches for\nall C++ standards.\n\n\n![Chat prompt to refactor into a multi-line written string and\nresponse](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750097645364.png)\n\n\n**Tip:** You can use GitLab Duo Code Suggestions to refine the source code\neven more after refactoring, or use alternative `/refactor` prompt\nrefinements.\n\n\n>/refactor into a multi-line written string, show different approaches\n\n>\n\n> /refactor into multi-line string, not using raw string literals\n\n>\n\n> /refactor into a multi-line written string. Make the table name\nparametrizable\n\n\nAn alternative approach with the `stringstream` type is shown in the [GitLab\nDuo Coffee Chat: Refactor C++ functions into OOP classes for abstract\ndatabase handling](https://www.youtube.com/watch?v=Z9EJh0J9358), [MR\ndiff](https://gitlab.com/gitlab-da/use-cases/ai/gitlab-duo-coffee-chat/gitlab-duo-coffee-chat-2024-01-23/-/commit/7ea233138aed46d77e6ce0d930dd8e10560134eb#4ce01e4c84d4b62df8eed159c2db3768ad4ef8bf_33_35). \n\n\n#### Explain vulnerabilities\n\n\nIt might not always work, but the `/explain` slash command can be asked\nabout security vulnerability explanations, too. In this example, the [C\ncode](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c)\ncontains multiple vulnerabilities for strcpy() buffer overflows, world\nwritable file permissions, race condition attacks, and more.\n\n\n> /explain why this code has multiple vulnerabilities\n\n\n![Chat prompt about the code's multiple\nvulnerabilities](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750097645365.png)\n\n\n#### Refactor C code into Rust\n\n\nRust provides memory safety. You can ask Duo Chat to refactor the vulnerable\n[C\ncode](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c)\ninto Rust, using `/refactor into Rust`. Practice with more refined prompts\nto get better results.\n\n\n> /refactor into Rust and use high level libraries\n\n\n![Chat\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097645366.png)\n\n\n### Shortcuts \n\n\nGive these shortcuts a try in your environment, and practice async using\nGitLab Duo Chat.\n\n\n1. Inspect vulnerable code from CVEs, and ask what it does, and how to fix\nit, using `/explain why is this code vulnerable`. \n\n**Tip:** Import open-source projects in GitLab to take advantage of GitLab\nDuo Chat code explanations.\n\n1. Try to refactor code into new programming languages to help legacy code\nmigration plans.\n\n1. You can also try to refactor Jenkins configuration into GitLab CI/CD,\nusing `/refactor into GitLab CI/CD configuration`. \n\n\n### Fun exercises \n\n\nTry to convince Chat to behave like Clippy.\n\n![Chat\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image22_aHR0cHM6_1750097645366.png)\n\n\nAsk about GitLab's mission: \"Everyone can contribute.\"\n\n\n![Chat\nprompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image33_aHR0cHM6_1750097645367.png)\n\n\n### Learn more\n\n\nThere are many different environments and challenges out there. We have\nupdated the [GitLab Duo Chat\ndocumentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) with\nmore practical examples, and added a new [GitLab Duo\nexamples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html) section\nwith deep dives into AI-powered DevSecOps workflows, including Chat.\n\n\n> Want to get going with GitLab Duo Chat? [Start your free trial\ntoday](https://about.gitlab.com/solutions/gitlab-duo-pro/self-managed-and-gitlab-dedicated-trial/).\n",[9,737,496,717],{"slug":3387,"featured":91,"template":693},"10-best-practices-for-using-ai-powered-gitlab-duo-chat","content:en-us:blog:10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","10 Best Practices For Using Ai Powered Gitlab Duo Chat","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",16,[679,699,724,745,766,786,806,821,842],1758326272436]