[{"data":1,"prerenderedAt":1252},["ShallowReactive",2],{"/en-us/blog/tags/frontend/":3,"navigation-en-us":19,"banner-en-us":449,"footer-en-us":466,"frontend-tag-page-en-us":676},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":10,"_id":12,"_type":13,"title":14,"_source":15,"_file":16,"_stem":17,"_extension":18},"/en-us/blog/tags/frontend","tags",false,"",{"tag":9,"tagSlug":9},"frontend",{"template":11},"BlogTag","content:en-us:blog:tags:frontend.yml","yaml","Frontend","content","en-us/blog/tags/frontend.yml","en-us/blog/tags/frontend","yml",{"_path":20,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":22,"_id":445,"_type":13,"title":446,"_source":15,"_file":447,"_stem":448,"_extension":18},"/shared/en-us/main-navigation","en-us",{"logo":23,"freeTrial":28,"sales":33,"login":38,"items":43,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":24},{"href":25,"dataGaName":26,"dataGaLocation":27},"/","gitlab logo","header",{"text":29,"config":30},"Get free trial",{"href":31,"dataGaName":32,"dataGaLocation":27},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":34,"config":35},"Talk to sales",{"href":36,"dataGaName":37,"dataGaLocation":27},"/sales/","sales",{"text":39,"config":40},"Sign in",{"href":41,"dataGaName":42,"dataGaLocation":27},"https://gitlab.com/users/sign_in/","sign in",[44,88,186,191,297,357],{"text":45,"config":46,"cards":48,"footer":71},"Platform",{"dataNavLevelOne":47},"platform",[49,55,63],{"title":45,"description":50,"link":51},"The most comprehensive AI-powered DevSecOps Platform",{"text":52,"config":53},"Explore our Platform",{"href":54,"dataGaName":47,"dataGaLocation":27},"/platform/",{"title":56,"description":57,"link":58},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":59,"config":60},"Meet GitLab Duo",{"href":61,"dataGaName":62,"dataGaLocation":27},"/gitlab-duo/","gitlab duo ai",{"title":64,"description":65,"link":66},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":67,"config":68},"Learn more",{"href":69,"dataGaName":70,"dataGaLocation":27},"/why-gitlab/","why gitlab",{"title":72,"items":73},"Get started with",[74,79,84],{"text":75,"config":76},"Platform Engineering",{"href":77,"dataGaName":78,"dataGaLocation":27},"/solutions/platform-engineering/","platform engineering",{"text":80,"config":81},"Developer Experience",{"href":82,"dataGaName":83,"dataGaLocation":27},"/developer-experience/","Developer experience",{"text":85,"config":86},"MLOps",{"href":87,"dataGaName":85,"dataGaLocation":27},"/topics/devops/the-role-of-ai-in-devops/",{"text":89,"left":90,"config":91,"link":93,"lists":97,"footer":168},"Product",true,{"dataNavLevelOne":92},"solutions",{"text":94,"config":95},"View all Solutions",{"href":96,"dataGaName":92,"dataGaLocation":27},"/solutions/",[98,123,147],{"title":99,"description":100,"link":101,"items":106},"Automation","CI/CD and automation to accelerate deployment",{"config":102},{"icon":103,"href":104,"dataGaName":105,"dataGaLocation":27},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[107,111,115,119],{"text":108,"config":109},"CI/CD",{"href":110,"dataGaLocation":27,"dataGaName":108},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":61,"dataGaLocation":27,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":27,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":104,"dataGaLocation":27,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":27,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,137,142],{"text":133,"config":134},"Application Security Testing",{"href":135,"dataGaName":136,"dataGaLocation":27},"/solutions/application-security-testing/","Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":27,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":27},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":27},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":27,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":27,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":27,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":27,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":27,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":27,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":27,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":27},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":27},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":27},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":27,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":27},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":27},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":27},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":27},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":27},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":27},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":27},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":27},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":27},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":27},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":27},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":27},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":27},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":27},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":27},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":27},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":27},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":27},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":27},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":27},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":27},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":27},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":27},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":27},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":34,"config":364},{"href":36,"dataGaName":365,"dataGaLocation":27},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":27},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":27},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":41,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":56,"config":389},{"href":61,"dataGaName":56,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":108,"config":395},{"href":110,"dataGaName":108,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":69,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":32,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":61,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":13,"_source":15,"_file":464,"_stem":465,"_extension":18},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":27},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":672,"_type":13,"title":673,"_source":15,"_file":674,"_stem":675,"_extension":18},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":664},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,515,571,600,634],{"title":45,"links":493,"subMenu":498},[494],{"text":495,"config":496},"DevSecOps platform",{"href":54,"dataGaName":497,"dataGaLocation":475},"devsecops platform",[499],{"title":187,"links":500},[501,505,510],{"text":502,"config":503},"View plans",{"href":189,"dataGaName":504,"dataGaLocation":475},"view plans",{"text":506,"config":507},"Why Premium?",{"href":508,"dataGaName":509,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":511,"config":512},"Why Ultimate?",{"href":513,"dataGaName":514,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",{"title":516,"links":517},"Solutions",[518,523,525,527,532,537,541,544,548,553,555,558,561,566],{"text":519,"config":520},"Digital transformation",{"href":521,"dataGaName":522,"dataGaLocation":475},"/topics/digital-transformation/","digital transformation",{"text":133,"config":524},{"href":135,"dataGaName":133,"dataGaLocation":475},{"text":122,"config":526},{"href":104,"dataGaName":105,"dataGaLocation":475},{"text":528,"config":529},"Agile development",{"href":530,"dataGaName":531,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":533,"config":534},"Cloud transformation",{"href":535,"dataGaName":536,"dataGaLocation":475},"/topics/cloud-native/","cloud transformation",{"text":538,"config":539},"SCM",{"href":118,"dataGaName":540,"dataGaLocation":475},"source code management",{"text":108,"config":542},{"href":110,"dataGaName":543,"dataGaLocation":475},"continuous integration & delivery",{"text":545,"config":546},"Value stream management",{"href":162,"dataGaName":547,"dataGaLocation":475},"value stream management",{"text":549,"config":550},"GitOps",{"href":551,"dataGaName":552,"dataGaLocation":475},"/solutions/gitops/","gitops",{"text":172,"config":554},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":556,"config":557},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":559,"config":560},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":562,"config":563},"Education",{"href":564,"dataGaName":565,"dataGaLocation":475},"/solutions/education/","education",{"text":567,"config":568},"Financial services",{"href":569,"dataGaName":570,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":572},[573,575,577,579,582,584,586,588,590,592,594,596,598],{"text":204,"config":574},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":576},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":578},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":580},{"href":221,"dataGaName":581,"dataGaLocation":475},"docs",{"text":242,"config":583},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":585},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":587},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":589},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":591},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":593},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":595},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":597},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":599},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":601},[602,604,606,608,610,612,614,618,623,625,627,629],{"text":305,"config":603},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":605},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":607},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":609},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":611},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":613},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":615,"config":616},"Sustainability",{"href":617,"dataGaName":615,"dataGaLocation":475},"/sustainability/",{"text":619,"config":620},"Diversity, inclusion and belonging (DIB)",{"href":621,"dataGaName":622,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":624},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":626},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":628},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":630,"config":631},"Modern Slavery Transparency Statement",{"href":632,"dataGaName":633,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":635,"links":636},"Contact Us",[637,640,642,644,649,654,659],{"text":638,"config":639},"Contact an expert",{"href":36,"dataGaName":37,"dataGaLocation":475},{"text":367,"config":641},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":643},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":645,"config":646},"Status",{"href":647,"dataGaName":648,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":650,"config":651},"Terms of use",{"href":652,"dataGaName":653,"dataGaLocation":475},"/terms/","terms of use",{"text":655,"config":656},"Privacy statement",{"href":657,"dataGaName":658,"dataGaLocation":475},"/privacy/","privacy statement",{"text":660,"config":661},"Cookie preferences",{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"cookie preferences","ot-sdk-btn",{"items":665},[666,668,670],{"text":650,"config":667},{"href":652,"dataGaName":653,"dataGaLocation":475},{"text":655,"config":669},{"href":657,"dataGaName":658,"dataGaLocation":475},{"text":660,"config":671},{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":677,"featuredPost":1230,"totalPagesCount":1250,"initialPosts":1251},[678,704,726,746,769,789,811,833,856,875,895,916,936,956,977,997,1021,1044,1066,1088,1108,1130,1150,1170,1191,1211],{"_path":679,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":680,"content":688,"config":697,"_id":700,"_type":13,"title":701,"_source":15,"_file":702,"_stem":703,"_extension":18},"/en-us/blog/efficient-code-review-tips",{"title":681,"description":682,"ogTitle":681,"ogDescription":682,"noIndex":6,"ogImage":683,"ogUrl":684,"ogSiteName":685,"ogType":686,"canonicalUrls":684,"schema":687},"How to carry out effective code reviews","From time management to unblocking, discover the secrets of more efficient code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678861/Blog/Hero%20Images/pre-commit.jpg","https://about.gitlab.com/blog/efficient-code-review-tips","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to carry out effective code reviews\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Phil Hughes\"}],\n        \"datePublished\": \"2020-09-08\",\n      }",{"title":681,"description":682,"authors":689,"heroImage":683,"date":691,"body":692,"category":693,"tags":694},[690],"Phil Hughes","2020-09-08","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-09-15.\n{: .alert .alert-info .note}\n\nLike most companies, code review at GitLab is a major part of our workflow. But it's clear from the results of our [2020 Global DevSecOps Survey](/developer-survey/) that code review can be a major reason for delayed releases and overall frustration. A vast majority of companies conduct code reviews (some even on a daily basis) but that doesn't mean it isn't a potential time sink.\n\n## How to perform a code review?\n\nBut code reviews can be done efficiently, and I know this since I've been a maintainer for 3 years. Here's a look at my top four tips for code review based on a tried and true routine that allows me to do effective code reviews and merge code quickly and efficiently to aid in others not being blocked by me. Of course, this is what works for *me* – your mileage may vary. Here's how I do it:\n\n### Tips for code review no.1 - Time management\n\nAn early start to my day makes it easy to start reviewing merge requests first thing. I set myself a time to start reviewing and I will keep at it until my GitLab \"to do\" list no longer has any merge requests that need reviewing. Mornings work for me; it's the time of the day when I can focus the most and get the reviews done with minimal distractions.\n\nGetting to reviews after this time is hard. I have other work that needs doing as well so once I've reviewed all merge requests on my list I leave anything new until the next day. Of course, as with all rules, it ends up getting broken. **Depending on the size of merge requests, I may make sure I review them before my day ends to make sure anyone in other timezones aren't blocked by me.**\n\n### Tips for code review no.2 - Unblock others first\n\nIt's not great for the author of a merge request to have to wait X hours/days before they get feedback. The sooner they get feedback, the sooner the merge request can be merged and shipped. Making authors wait just creates uncertainty and may mean that other work gets held up.\n\nThis is why I find it important for me to review a merge request as quickly as possible. At GitLab we have a [2  day Service Level Objective (SLO)](/handbook/engineering/workflow/code-review/#review-response-slo) for feedback from reviewers. For myself, I always try to do better than that and respond within a day.\n\n### 3. Tips for code review no.3 - Focus on the code, not the feature\n\nThis is going to be a point that could create a lot of discussion: Instead of focusing on the feature, focus on the code.\n\nA lot of the merge requests I review are across different groups, with features that I don't fully understand or with features I have no way to test. I could spend a lot of my time reading into the feature and the issue to understand what it is, but that means spending more time not reviewing everyone else's code. Also, if I did this with **every** merge request, it would be hard for me to keep to my time limit.\n\nWho is better to review the feature itself then? The product designer (UX) or the product manager both understand the feature being worked on and are better suited to help find bugs or guide the feature in the correct way. It is important that someone in the UX team review the feature to make sure it matches the designs and vision they had created for the feature. If a merge request has no UX review by the time I get to reviewing it, I will normally ask the author (or ask a product designer myself) to have the UX reviewed _before_ I merge the merge request.\n\nHowever, this point is also something I don't _always_ stick to. If a merge request is touching an area that I am familiar with and I can tell from the code that a bug exists, I will test it locally and provide as much feedback as I can to help the author understand the bug. The more you – as a reviewer – work with the code, the easier finding bugs through the code becomes. I have been working on the GitLab codebase for over 4 years, so seeing where bugs could arise through looking at the code has become natural to me.\n\n### Tips for code review no.4 - Seek to understand: Ask questions\n\nIt is easy to suggest changes to the code that I am reviewing, however sometimes what I suggest may not be right. It is important that instead of just suggesting a change, you always try to ask if the author thinks it is the right change. Having a conversation around a change helps both the reviewer and the author understand the existing code as well as the code being suggested. Maybe the suggestion had already been tried by the author. Being open to talk about it helps get to the final solution.\n\nSometimes however suggestions for changes happen around legacy code, i.e., code that has existed for a long time without being updated to match our documentation. In these cases, the conclusion may end up being that a technical debt issue be created. This is ok. We should strive for [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) first but also understand that a more optimal solution may be required in the future.\n\n## To sum it up\n\nReviewing code efficiently is a skill that gets learnt the more you do it. Spending time coming up with a workflow that works for you is just as important. Over the years I have been reviewing code, I have stuck to these tips as closely as possible. Yet, I am far from perfect; I am constantly learning about new and different ways to optimise my workflow for code review. I would love to hear other tips and workflows. It is through discussions that we can improve and push ourselves to be the best that we can be.\n","insights",[9,695,696],"testing","code review",{"slug":698,"featured":6,"template":699},"efficient-code-review-tips","BlogPost","content:en-us:blog:efficient-code-review-tips.yml","Efficient Code Review Tips","en-us/blog/efficient-code-review-tips.yml","en-us/blog/efficient-code-review-tips",{"_path":705,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":706,"content":712,"config":720,"_id":722,"_type":13,"title":723,"_source":15,"_file":724,"_stem":725,"_extension":18},"/en-us/blog/get-ready-for-commit",{"title":707,"description":708,"ogTitle":707,"ogDescription":708,"noIndex":6,"ogImage":709,"ogUrl":710,"ogSiteName":685,"ogType":686,"canonicalUrls":710,"schema":711},"How to get the most out of GitLab Commit","We’re taking over the Williamsburg neighborhood of Brooklyn and opening up our world to you. Here’s everything you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664134/Blog/Hero%20Images/gitlabcommitbrooklyn.png","https://about.gitlab.com/blog/get-ready-for-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get the most out of GitLab Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily Kyle\"}],\n        \"datePublished\": \"2019-09-13\",\n      }",{"title":707,"description":708,"authors":713,"heroImage":709,"date":715,"body":716,"category":300,"tags":717},[714],"Emily Kyle","2019-09-13","\nWe’re (almost!) ready – are you? The inaugural GitLab Commit in Brooklyn is just around the corner.  We wanted to share some details on the event and how to get the most out of it while you’re onsite and after it’s over.\n\nWhen planning this event we made the strategic choice to avoid convention centers and stuffy hotel ballrooms. We like to be a bit more playful (and out of the box) in how we approach our community and events. We decided to model this event after a block party and on Tuesday September 17 we’ll be taking over 8 venues. On your walk from the subway you’ll notice the street has been “GitLabbed” and will be full of signs. Everything – signs, content, staff – will welcome you. If we did it right it will be quirky, fun, innovative, collaborative, and inclusive. A small block in Brooklyn will _be_ GitLab for a day. We can’t wait to share that vision with everyone attending.\n\nStart by checking in at the Williamsburg Hotel, 96 Wythe Ave, Brooklyn, NY 11249\n(between Nassau St. on the G or Bedford Ave. on the L). Don’t forget to grab breakfast treats and coffee.\nOver the course of the day we will also have sessions and activities in and around the following\nvenues: the  [Wythe Hotel](https://wythehotel.com), [Schimanski](https://www.schimanskinyc.com),\n[Brooklyn Bowl](https://www.brooklynbowl.com), [Kinfolk 90](https://kinfolklife.com/locations/kinfolk-90/) & [Kinfolk 94](https://kinfolklife.com/locations/kinfolk-94/). Each venue serves a specific function and has its own personality but each one flows seamlessly into the next one.\n\n![Map of GitLab Commit](https://about.gitlab.com/images/blogimages/gitlabcommitmap.png){: .shadow.small.center.wrap-text}\n\nIt’s a neighborhood takeover!\n{: .note.text-center}\n\n## Remember to schedule\n\nYou can find the schedule [here](https://gitlabcommit2019brooklyn.sched.com). To get the most out of your day on site, we suggest building out your schedule in the sched link just mentioned so you can reserve your slot in each of the tracks. There will be 3 tracks – cloud native, DevOps in action, and powered by GitLab – and each will be color coded to help you navigate throughout the day.\n\n## And it’s not over yet…\n\nWe will close out the day of sessions at the historic Brooklyn Bowl directly following the day’s packed lineup for networking, food & beverages and of course bowling. The party kicks off at 5 pm.\n\n## Other important details\n\nLooking for a well brewed cappuccino or latte?  Kinfolk 90 will be serving Commit attendees with a badge from 12pm-5pm at no charge, so drop in between sessions.\n\nHave questions about our product offerings, a nagging support item, want deeper insight into our security offerings, or time to visit with some of our sponsors?  The Library at the Williamsburg Hotel will be open all day for some one-to-one interaction.\n\nLastly we have a [few spots still open](https://about.gitlab.com/events/commit/) if you want to get in on this action packed day of learning! There is also still time to sign up for our [London event](/events/commit/#) in October.\n",[268,718,278,719,9],"contributors","DevOps",{"slug":721,"featured":6,"template":699},"get-ready-for-commit","content:en-us:blog:get-ready-for-commit.yml","Get Ready For Commit","en-us/blog/get-ready-for-commit.yml","en-us/blog/get-ready-for-commit",{"_path":727,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":728,"content":734,"config":740,"_id":742,"_type":13,"title":743,"_source":15,"_file":744,"_stem":745,"_extension":18},"/en-us/blog/gitlab-at-vue-conf",{"title":729,"description":730,"ogTitle":729,"ogDescription":730,"noIndex":6,"ogImage":731,"ogUrl":732,"ogSiteName":685,"ogType":686,"canonicalUrls":732,"schema":733},"GitLab was at VueConf 2017!","GitLab was at VueConf 2017 sharing how we use Vue.js.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682980/Blog/Hero%20Images/cover_image.jpg","https://about.gitlab.com/blog/gitlab-at-vue-conf","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab was at VueConf 2017!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Filipa Lacerda\"}],\n        \"datePublished\": \"2017-06-29\",\n      }",{"title":729,"description":730,"authors":735,"heroImage":731,"date":737,"body":738,"category":300,"tags":739},[736],"Filipa Lacerda","2017-06-29","Last week I attended [VueConf 2017](https://conf.vuejs.org/) explaining how\nwe, at GitLab, changed from [CoffeeScript](http://coffeescript.org/) to\n[EcmaScript 6](http://www.ecma-international.org/ecma-262/6.0/) and how we\nincluded [Vue.js](https://vuejs.org/) in our stack.\n\n\n\u003C!-- more -->\n\n\nVueConf took place in the beautiful city of\n[Wrocław](https://en.wikipedia.org/wiki/Wroc%C5%82aw) in Poland. Props to\n[Monterail](https://www.monterail.com/) for putting together such a\nwell-organized conference.\n\n\nI had the pleasure of meeting all the speakers and organizers and the Vue\ncommunity is inspiring. Everyone is kind and willing to share their\nknowledge. Having Evan You personally introduce everyone says a lot about\nthe spirit of this community.\n\n\nThis conference would not have been possible without the help of the\nsponsors and organizers, thank you [Monterail](https://www.monterail.com/),\n[Codeship](https://codeship.com/), [Monaca](https://monaca.io/), [Native\nScript](https://www.nativescript.org/), [Evan\nYou](https://twitter.com/youyuxi) and [Damian\nDulisz](https://twitter.com/damiandulisz) for organizing such a great\nconference!\n\n\nIn the [slides for my\ntalk](https://filipa.gitlab.io/vue_conf_2017/vue_gitlab_2017.pdf), I guide\nyou through our journey from CoffeeScript to ES6 and from jQuery to Vue.js.\n\n\n## How we use Vue at GitLab\n\n\nAs stated in previous blog posts, we will not rewrite all our code in\nVue.js. Instead, we will create several small Vue applications, which is\nsimilar to many small Single Page Applications.\n\n\nIn order to help us with state management, we chose a simple architecture\nand data flow to build our Vue Applications. We have a main Vue component, a\nservice that allows us to get data and a store that saves the data we\nreceive from the service:\n\n\n![architecture-1](https://about.gitlab.com/images/blogimages/gitlab-at-vue-conf/graph_arc_1.png\n\"Vue Application Architecture\")*\u003Csmall>Vue Application Architecture and Data\nFlow.\u003C/small>*\n\n\nWe start by adding an element to the DOM in the haml file, and point to a\nJavaScript file. We take advantage of `data-attributes` to transfer data we\nonly have access in Rails through our Vue application.\n\n\n```html\n  #pipelines-list-vue{ data: {\n    endpoint: namespace_project_pipelines_path(@project),\n    \"help-page-path\" => help_page_path(@project),\n    \"all-path\" =>  project_pipelines_path(@project),\n    \"pending-path\" => project_pipelines_path(@projec),\n    \"ci-lint-path\" => ci_lint_path } }\n\n  = webpack_bundle_tag('common_vue')\n  = webpack_bundle_tag('pipelines')\n```\n\n\nThe next step is to create a bundle file where we are going to mount our\napplication. We can say this is the index file of our application.\n\n\n```javascript\n  import Vue from 'vue';\n  import pipelinesComponent from './pipelines.vue';\n\n  document.addEventListener('DOMContentLoaded', () => {\n    return new Vue({\n      el: '#pipelines-list-vue',\n\n      components: {\n        pipelinesComponent,\n      },\n\n      render(createElement) {\n        return createElement('pipelines-component');\n      },\n    });\n  });\n```\n\n\nWe then need to create our store and our service, they are both simple\nclasses. To communicate with our API we use `vue-resource` to help us.\n\n```javascript\n  // store.js\n  export default class PipelinesStore {\n    constructor() {\n      this.state.pipelines = [];\n    }\n    storePipelines(pipelines = []) {\n      this.state.pipelines = pipelines;\n    }\n  }\n```\n\n\n```javascript\n  // service.js\n  import Vue from 'vue';\n  import VueResource from 'vue-resource';\n\n  Vue.use(VueResource);\n\n  export default class PipelinesService {\n    constructor(endpoint) {\n      this.pipelines = Vue.resource(endpoint);\n    }\n    getPipelines(data = {}) {\n      return this.pipelines.get(data);\n    }\n    postAction(endpoint) {\n      return Vue.http.post(`${endpoint}.json`);\n    }\n  }\n```\n\n\nThe next step is to create our main component where we bind everything\ntogether. As soon as the component is created we make a call to the service,\nand if everything goes well, we tell the store to use the received data. If\nwe get an error we simply show a warning to the user.\n\n\nUsually we have several smaller components that are used in the main one,\nthat allows us not only to reuse them but also to have readable files.\n\n\n```vue\n\n\u003Cscript>\n  import Service from 'service';\n  import Store from 'store';\n\n  export default {\n    data() {\n      const dataset = document.querySelector('#pipelines-list-vue').dataset;\n      const store = new Store();\n      const service = new Service(endpoint);\n\n      return {\n        store,\n        service,\n      };\n    },\n    created() {\n      this.service.getPipelines()\n        .then((response) => response.json())\n        .then((pipelines) => this.store.storePipelines(pipelines))\n        .catch((error) => this.handleError(error));\n    },\n  };\n\u003C/script>\n\n\n\u003Ctemplate>\n  \u003Ctable>..\u003C/table>\n\u003C/template>\n\n```\n\nIn some places we have more complex cases where we can’t rewrite it all in\nVue, and we’ll have to use html and jQuery as well.\n\n\nFor example, in the Pipelines' details page, only the header and the graph\nare built in Vue.js since are the only ones with real time data.\n\nIf we built this page with the architecture explained above, we would need\nto fetch data from the same endpoint twice, and we need to poll the same\nendpoint twice, which is not a good idea. To avoid duplicate network calls\nwe created a mediator to act as our main component.\n\n\n![architecture-2](https://about.gitlab.com/images/blogimages/gitlab-at-vue-conf/graph_arc_2.png\n\"Vue Application Architecture with a Mediator\")*\u003Csmall>A Mediator allows us\nto reuse the same state between Vue Applications.\u003C/small>*\n\n\nThe mediator not only allows us to avoid duplicate network calls, it also\nallows us to share state between the two Vue Applications and reduce\nrepeated code. It also has the major advantage that can be easily\ntransformed into a Vue main component if needed.\n\n\nYou can read more about our architecture\n[here](https://docs.gitlab.com/ee/development/fe_guide/vue.html#vue-architecture).\nWe have documentation explaining [when to use vue at\nGitLab](https://docs.gitlab.com/ee/development/fe_guide/vue.html#when-to-use-vue-js)\nand how to do it. We also have a small [style guide for our vue\ncode](https://docs.gitlab.com/ee/development/fe_guide/style/javascript.html#vuejs).\n\n\n## Future plans for Vue at GitLab\n\n1. The next step is to make sure all our Vue code looks the same and is\norganized well.\n\n1. Other thing we need to do is to have all components in .vue files. You\ncan see the issue\n[here](https://gitlab.com/gitlab-org/gitlab-ce/issues/34371).\n\n1. We also need to create reusable components. With all new Vue.js code\nbeing added at the same time we ended up with a lot of repeated code in Vue,\nwhich we have identified and are currently transforming into reusable\ncomponents. You can see the issue\n[here](https://gitlab.com/gitlab-org/gitlab-ce/issues/30286).\n\n1. We need a linter. Vue is currently the only part of our frontend code\nthat does not have a linter yet, although we have a style guide for Vue.js\nin our documentation. You can see the issue\n[here](https://gitlab.com/gitlab-org/gitlab-ce/issues/34312).\n\n1. We are currently experimenting adding Vuex to our stack to see if it can\nhelp us in more complex areas of our code. The merge request is\n[here](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12069).\n\n\nHope to see you at the next VueConf! _Na zdrowie!_\n\n\n[Cover\nimage](https://pixabay.com/en/wroc%C5%82aw-lower-silesia-architecture-1663406/)\nby [Przemysław Krzak](https://pixabay.com/en/users/przemokrzak-2778444/) is\nlicensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n",[9,278],{"slug":741,"featured":6,"template":699},"gitlab-at-vue-conf","content:en-us:blog:gitlab-at-vue-conf.yml","Gitlab At Vue Conf","en-us/blog/gitlab-at-vue-conf.yml","en-us/blog/gitlab-at-vue-conf",{"_path":747,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":748,"content":754,"config":763,"_id":765,"_type":13,"title":766,"_source":15,"_file":767,"_stem":768,"_extension":18},"/en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"title":749,"description":750,"ogTitle":749,"ogDescription":750,"noIndex":6,"ogImage":751,"ogUrl":752,"ogSiteName":685,"ogType":686,"canonicalUrls":752,"schema":753},"GitLab and HashiCorp streamline delivery workflows","Discover how to leverage CI/CD for your infrastructure scripts with Terraform and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670238/Blog/Hero%20Images/gitlab-terraform-pipelines.jpg","https://about.gitlab.com/blog/gitlab-hashicorp-terraform-vault-pt-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and HashiCorp: Providing application and infrastructure delivery workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kelly Hair\"},{\"@type\":\"Person\",\"name\":\"Anthony Davanzo\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":755,"description":750,"authors":756,"heroImage":751,"date":759,"body":760,"category":693,"tags":761},"GitLab and HashiCorp: Providing application and infrastructure delivery workflows",[757,758],"Kelly Hair","Anthony Davanzo","2019-09-17","\nA growing number of teams are becoming more and more invested in continually improving the business through iterative development. Adopting the culture of DevOps isn’t necessarily confined to software development itself, but is equally applicable to ITOps, System Admins, and other infrastructure teams as well. Just as a proper CI/CD workflow is the foundation of today’s application delivery, a similar automated workflow is essential for managing the delivery of infrastructure as well.\n\nAs developers try to become more agile in building, packing, and testing their applications, having the right CI/CD tool that is flexible to other automation use cases is critical. GitLab has gone into great detail about their [flexible CI/CD capabilities here](https://docs.gitlab.com/ee/ci/introduction/index.html#how-gitlab-cicd-works). What’s sometimes overlooked is implementing the proper CI/CD process for the underlying infrastructure that these applications rely on. In addition to application delivery, organizations need to consider what their infrastructure delivery process looks like. GitLab and HashiCorp have partnered to create a multi-blog series on how to combine the application delivery workflow with the infrastructure delivery workflow. In this part we will discuss a high-level overview of the solutions that we will dive deeper into in Part 2.\n\n## Leveraging HashiCorp Terraform for CI/CD Pipelines\n\n[HashiCorp Terraform](https://www.terraform.io/) is an open source tool for provisioning infrastructure as code. Users define infrastructure in HashiCorp Configuration Language (HCL) configuration files, Terraform reads those configurations, offers a speculative plan of what it will create, and then users confirm and apply those changes. Terraform keeps track of what infrastructure is provisioned in a state file.\n\nThe recently announced Terraform Cloud application provides users with additional automation and collaboration capabilities on top of Terraform, such as remotely managing and version that state file, executing Terraform runs (plan/apply) remotely, and allowing teams to comment and collaborate on Terraform. By remotely managing state files, Terraform Cloud empowers teams to work more quickly and safely in parallel without concerns of losing the file or overwriting each other's changes. These features are especially helpful for users implementing CI/CD pipelines because they allow users to interact with Terraform via webhooks/API instead of having Terraform run on a local machine.\n\nMost users will store their configuration files in a VCS (Version Control System) like GitLab and connect that VCS to Terraform Cloud. That connection allows users to borrow best practices from software engineering to version and iterate on infrastructure as code, using VCS and Terraform Cloud as a provisioning pipeline for infrastructure. Terraform will automatically run a plan upon changes to configuration files in a VCS. This plan can be reviewed by the team for safety and accuracy in the Terraform UI, then it can be applied to provision the specified infrastructure. Terraform Cloud can also be configured to automatically apply those changes.\n\nTerraform Cloud also includes a Governance upgrade, which provides access to the [Sentinel](https://www.hashicorp.com/sentinel) policy as code framework.  This framework allows users to define fine-grain rules and policies for their infrastructure that are automatically enforced before that infrastructure is provisioned. This allows users to work with the speed and efficiency they want in their continuous integration/delivery pipelines, while still ensuring that best practices are being implemented.\n\n### Future iterations\n\nIt is also worth discussing current work in progress with GitLab and Vault. Vault from Hashicorp secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets that services depend on. In efforts to improve [Variables and secrets management in GitLab CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/816) we’re working with HashiCorp to provide a [first-class integration with Vault](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) sometime in the future.\n\n## Next steps\n\nAs a follow up, we will soon be posting a blog on the technical details of _how_ to build a Terraform pipeline in GitLab CI/CD.\n\nIn meantime, check out how [WagLabs reduced their release process from 40 minutes to just six](/blog/wag-labs-blog-post/), using Terraform and GitLab CI/CD!\n\n### About the authors\n\n_[Anthony Davanzo](https://www.linkedin.com/in/anthonydavanzo/) is the product marketing manager for Terraform Cloud at HashiCorp. In this role he focuses on bringing Terraform Cloud to market, hoping to drive adoption and spread awareness of the tool. His prior role as the technical product marketing manager for Terraform helps with deep domain knowledge and before HashiCorp, he was a product marketing manager at Cloudflare._\n\n_[Kelly Hair](/company/team/#khair1) is a solutions architect at GitLab._\n\nPhoto by [Saad Salim](https://unsplash.com/@saadx?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[108,762,719,232,9],"cloud native",{"slug":764,"featured":6,"template":699},"gitlab-hashicorp-terraform-vault-pt-1","content:en-us:blog:gitlab-hashicorp-terraform-vault-pt-1.yml","Gitlab Hashicorp Terraform Vault Pt 1","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1.yml","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"_path":770,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":771,"content":776,"config":783,"_id":785,"_type":13,"title":786,"_source":15,"_file":787,"_stem":788,"_extension":18},"/en-us/blog/gitlab-series-e-funding",{"title":772,"description":773,"ogTitle":772,"ogDescription":773,"noIndex":6,"ogImage":709,"ogUrl":774,"ogSiteName":685,"ogType":686,"canonicalUrls":774,"schema":775},"Announcing $268 million in Series E funding","New funding and our $2.75 billion valuation will allow us to enhance monitoring and security capabilities.","https://about.gitlab.com/blog/gitlab-series-e-funding","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing $268 million in Series E funding\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":772,"description":773,"authors":777,"heroImage":709,"date":759,"body":779,"category":300,"tags":780},[778],"GitLab","We’re excited to share that GitLab has completed a $268 million Series E round of fundraising that pushed the company’s valuation to $2.75 billion. This latest funding round was led by existing investors Goldman Sachs and ICONIQ, but also included participation from nine new-to-GitLab investors.\n\nOur plans for the funding are straightforward: GitLab will invest to make all of our [DevOps platform](/topics/devops-platform/) offerings, including monitoring, security, and planning, _best in class_ so we can enable our [enterprise customers](/enterprise/) to continue to bring products to market faster.\n\nAt a time when the DevOps tools market is expected to triple by 2023 (from $5.2 billion last year to $15 billion, according to IDC), it was clear there was an opportunity for our company to pursue additional funding.\n“To be competitive today, companies need to be 10x faster to market. We made an early bet that enterprises would benefit from a single application experience for DevOps teams to accelerate getting software products to market faster and more securely,” says CEO [Sid Sijbrandij](/company/team/#sytses). “I love hearing how our customers are innovating faster with a single DevOps application that enables Dev, Ops, and Security to collaborate, and this funding will help more organizations experience the benefits of this unified DevOps experience.”\n\nToday more than 100,000 organizations use GitLab, including Ask Media Group, Charter Communication, Delta Air Lines, Goldman Sachs, Ticketmaster, Nvidia, and [many more](/customers/). We just found out we were ranked 32nd in the [Forbes 2019 Cloud 100](https://about.gitlab.com/2019-09-11-gitlab-named-leader-in-forbes-cloud-100-list/) – and we were the only cloud-agnostic DevOps tool maker named! Our ARR (annual recurring revenue) growth rate is 143%, a sign of customer satisfaction and strong demand.\n\n## A fast pace\n\nThis latest fundraising effort happened less than a year after we announced our [Series D round of $100 million](/blog/announcing-100m-series-d-funding/). At that time the company was valued at $1.1 billion; with today’s announcement, our valuation has more than doubled in less than a year.\n\nIt’s been an amazing journey to get to this point, and it’s worth remembering where we came from. In 2015 fewer than 10 people worked at GitLab; today over 800 team members contribute from 55 countries around the world. And we’re still growing, as our [222 open positions](/jobs/) show. More than 4,800 people actively contribute code to GitLab, and we receive an average of 180 improvements to each monthly release. In March 2019 we had [one million merge requests](/blog/1-mil-merge-requests/), which was a milestone indeed.\nWe’re on this journey together and we couldn’t be more excited to see where it takes us. Today you’ll find us at our first ever user conference, [GitLab Commit](/events/commit/), in Brooklyn and then again in London on October 9. We’re looking forward to the inspiring customer stories that have made this all possible.\n\nThe funding was announced live in the [keynote of GitLab Commit Brooklyn](https://www.youtube.com/watch?v=6LrgxOfWMXA&list=PLFGfElNsQthaaqEAb6ceZvYnZgzSM50Kg), also see [the playlist of all talks that day](https://www.youtube.com/playlist?list=PLFGfElNsQthaaqEAb6ceZvYnZgzSM50Kg).",[781,268,278,782,9],"news","features",{"slug":784,"featured":6,"template":699},"gitlab-series-e-funding","content:en-us:blog:gitlab-series-e-funding.yml","Gitlab Series E Funding","en-us/blog/gitlab-series-e-funding.yml","en-us/blog/gitlab-series-e-funding",{"_path":790,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":791,"content":797,"config":805,"_id":807,"_type":13,"title":808,"_source":15,"_file":809,"_stem":810,"_extension":18},"/en-us/blog/gitlab-vue-one-year-later",{"title":792,"description":793,"ogTitle":792,"ogDescription":793,"noIndex":6,"ogImage":794,"ogUrl":795,"ogSiteName":685,"ogType":686,"canonicalUrls":795,"schema":796},"How we do Vue: one year later","How we, at GitLab, write VueJS, one year later.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680321/Blog/Hero%20Images/vue-title.jpg","https://about.gitlab.com/blog/gitlab-vue-one-year-later","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we do Vue: one year later\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2017-11-09\",\n      }",{"title":792,"description":793,"authors":798,"heroImage":794,"date":800,"body":801,"category":802,"tags":803},[799],"Jacob Schatz","2017-11-09","It's been a while since [we wrote about Vue](/blog/why-we-chose-vue/). We've\nbeen using Vue for over a year now and life has been very good. Thanks\n[@lnoogn](https://twitter.com/lnoogn) for reminding me to write this\narticle!\n\n\n\u003C!-- more -->\n\n\nOur situation reminds me of a quote about Scala from [\"Is Scala slowly\ndying?\"](https://www.reddit.com/r/scala/comments/2hw0bp/is_scala_slowly_dying/)\nSomeone once said:\n\n\n> Scala people don't have time for redditing and blogging, they're busy\ngetting crap done.\n\n\nWhich is exactly what we've been doing. Like Scala, Vue works really, really\nwell, when used properly. It turns out Vue isn't a buzzword, Vue is a\nworkhorse. A lot of our problems have been solved, by us and others. We\nstill have problems but, we now have a reproducible \"way to write Vue.\" We\ndon't adopt every new idea out there, but we have changed a few things since\nwe last spoke.\n\n\nSince that last post, we published a [very extensive Vue style\nguide](https://docs.gitlab.com/ee/development/fe_guide/vue.html), after\nwhich Vue also put out a [style guide](https://vuejs.org/v2/style-guide/),\n[taking inspiration from\nours](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845).\nThe style guide has been updated several times as we discover better ways to\nwrite Vue. Here are some of the things we discovered.\n\n\n## Just use VueX\n\n\nWe discovered that [VueX](https://vuex.vuejs.org/) makes our lives easier.\nIf you are writing a medium to large feature, use VueX. If it's a tiny\nfeature, you might get away without it. We made the mistake of not using\nVueX for a large feature. We wrote a [multi-file\neditor](https://gitlab.com/gitlab-org/gitlab-ce/issues/31890) (WIP) to\nreplace our current repo file view, to allow easy editing of multiple files.\n\n\n![multi-file-editor.png](https://about.gitlab.com/images/vue_2017/multi-file-editor.png){:\n.shadow}\n\n\nIn the beginning we did not use VueX for this feature and instead used the\nstore pattern. The Vue docs talk about the [store\npattern](https://vuejs.org/v2/guide/state-management.html#Simple-State-Management-from-Scratch),\nwhich works well when you are committed to strictly keeping to the pattern.\nWe've found that you are better off spending your time with VueX instead.\nWhile VueX is initially more verbose, it is much more scalable, and will\nsave you tons of time in the long run. Our mistake happened when we changed\nthe data in multiple places. In VueX you are forced to change the data in\none central place. If you don't do this, you will wind up chasing unexpected\nbugs around.\n\n\n## Write high quality code\n\n\nEven though VueJS and VueX are both wonderful, it is still possible (as with\nany code) to write bad Vue code. While the code may work, your longevity and\nscalability may suffer. Performance can suffer. With Vue, it makes it so\neasy to have what seems like working, perfect code because Vue is so simple\nto write. Longevity problems can mean that your code initially works, but\nyou (and others) will have a hard time trying to update the code.\nPerformance problems might not crop up with small data sets, but will with\nlarger ones. Code can get messy. Your code can get smelly. Yes, even with\nVue, you can have [code smell](https://en.wikipedia.org/wiki/Code_smell).\n\n\nWhen you add something to the `data` object or the `store` for Vue to keep\ntrack of, Vue will recursively walk down your data object and keep track of\neverything. If your data is super hierarchical and just large in general,\nand you are changing things often (like maybe on `mousemove`), then you can\ncreate jank. It's not bad to have Vue observe large data sets, but just\nconfirm that you do in fact need the data you are watching to be reactive.\nIt's easy with Vue to just make everything reactive, when it might not need\nto be.\n\n\nThat's why we are very strict when anyone writes Vue code. They must [follow\nour\ndocumentation](https://docs.gitlab.com/ee/development/fe_guide/vue.html).\nThey must also only write Vue when it is necessary and not write it [when it\nis\noverkill](https://docs.gitlab.com/ee/development/fe_guide/vue.html#when-not-to-use-vue-js).\n\n\nAll of our new Vue code follows the [Flux\narchitecture](https://facebook.github.io/flux/). VueX also follows Flux,\nwhich is part of the reason we use VueX. You can use the previously\nmentioned \"store pattern,\" but VueX is a better choice because it enforces\nall of the rules. If you go rogue, you will wind up enforcing the rules\nyourself, and you will probably make mistakes. The less you put on your\nplate, the better. A good example of a well-written Vue app is the [registry\nimage list](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14303).\n\n\n### I want to use jQuery with Vue\n\n\nDuring new development, this question kept popping up.\n\n> Is it ever OK to mix jQuery with VueJS?\n\n\nWe are not talking about using [Select2](https://select2.org/), which is a\njQuery library. We are talking about the need to query the DOM. We had\ndiscussions about using jQuery and the following was proposed:\n\n\n> Using jQuery is OK, but only for querying.\n\n\nAt first I had several discussions about using jQuery with Vue. Some had\nsaid it might be OK, but only in read-only (querying) situations. However,\nafter doing the research, we found that it is **not** a good idea to use\njQuery with Vue. There will always be a better solution. We found that if\nyou ever find yourself needing to query to DOM within a Vue architecture,\nthen you are doing something wrong.\n\n\nIf one were to hypothetically use jQuery for only the tiniest querying\nsituations, one would have to quantify those situations. You should instead\nswear off querying the DOM when in Vue.\n\n\nInstead of querying, you will find that using the `store` in combination\nwith the server-side code is usually a much simpler answer. The server can\nprovide validity to your data that you cannot provide on the client side.\nFor the most part, we find that the less we have to fool with the data on\nthe client side the better. That's not to say it's never OK to modify the\ndata on the client side, but that it isn't usually the cleanest solution. At\nGitLab we use querying only to grab endpoints from the `data` attribute of\nour main element, but we don't use jQuery, we use `el.dataset`. At GitLab,\nwe (the Frontend people) talk with the Backend people to ensure the\nstructure of the data we will be consuming. In that way, both the Frontend\nteam and the Backend team can be in control.\n\n\n#### Example situation:\n\n\nCheck out this issue:\n\n\n![issue](https://about.gitlab.com/images/vue_2017/issue.png){: .shadow}\n\n\nWe now render all issue comments in Vue. An example of a situation where we\nwanted to use jQuery was during the rewrite of the\nedit-the-last-user-comment feature. When someone presses that `up` key on\ntheir keyboard from an empty new comment `textarea` (at the very bottom of\nthe page) we allow them to edit the last comment they created, just like in\nSlack. Not just the last comment, but the last comment *they created*. We\nmarked the last user comment in the picture in red. Of course there is a\ntime crunch. Then someone might say,\n\n\n> Can't we just do a quick solution here and fix it later?\n\n\nSurely you *could* query the DOM for this. A better solution, in this case,\nis to let the backend developers mark the last user comment in the JSON they\nreturn. Backend developers have direct access to the database, which means\nthey may be able to optimize the code. Then no client-side work has to be\ndone at all, in this case. Someone has to do the work to mark the last user\ncomment. In this case the solution is just finding the right person for the\njob. Once you have that data from the server, the comment is in your\n`store`, ready for your easy access. You can do anything now. The world is\nyour oyster.\n\n\nIf you find yourself querying the DOM, \"just this one time\" 😉, there is\nalways a better solution.\n\n\n### The proper Vue app\n\n\nEvery Vue bundle needs one store, one service, and always has one entry\npoint. Your entry point component is the only container component and every\nother component is presentational. All this information is in our Vue docs.\n\n\nYou can start out with a single `div`.\n\n\n```html\n\n\u003C!--HAML-->\n\n.js-vue-app{ data: { endpoint: 'foo' }}\n\n\n\u003C!--HTML-->\n\n\u003Cdiv class=\"js-vue-app\" data-endpoint=\"foo\">\u003C/div>\n\n```\n\nYou can pass your endpoints in through the data attributes. Vue can then\ncall these endpoints with an HTTP client of your choice.\n\n\nYou don't want to do any URL building in client-side JavaScript. Make sure\nyou pass in all your server-built URLs through endpoints. When writing Vue\nit's important to let the server do what it should.\n\n\n## Improve performance\n\n\nWe recently rewrote our issue comments in Vue. The issue comments were\npreviously written in Haml, jQuery, and Rails. We had a bottleneck because\nwe were not loading the comments asynchronously. A quick solution is to load\ncomments via ajax and populate comments after the page loads. One way to\nmake a page load faster is to not block the page with heavy items and load\nthem after.\n\n\n![comments.png](https://about.gitlab.com/images/vue_2017/comments.png){:\n.shadow}\n\n\nWhat we love is that one day we turned on the new comments and some people\ndidn't know that we had refactored it. As a result of the refactor our issue\npages load much faster, and there is less jank.\n\n\nLoading the comments on the issue page is now streamlined and now individual\nissues load much faster. In the past, an issue page could have tens of\nthousands of event listeners. Our previous code was not properly removing\nand keeping track of event listeners. Those massive event listeners (along\nwith other problems) created jank, so scrolling the page was choppy with\nmany comments. We removed jQuery and added in Vue and focused on improving\nthe performance. You can clearly see and feel that the page is much faster.\nHowever, our work to improve the performance has just begun. This rewrite\nsets the foundation for performance improvements that are easier to write,\nbecause the code is much more maintainable. Previously the code was hard to\nmaintain. Now the issue comments code is properly separated and\n\"componentized.\"\n\n\nWith these new improvements, as well as other parallel improvements, e.g.\nloading images on scroll, we were able to make the page load and perform\nfaster.\n\n\n![speed.png](https://about.gitlab.com/images/vue_2017/speed.png){: .shadow}\n\n\nRefactoring is that word that a new, super-green developer mentions on day\none when they suggest to rewrite everything in Angular. That hasn't happened\nat GitLab. Our frontend devs tend to be very conservative, which is a very\ngood thing. Which begs the question, why does it seems like [everyone is\nalways refactoring](https://reasonml.github.io/community/blog/#reason-3)?\nWhat are they trying to achieve? I can only speak for GitLab. What do we\nwant to achieve with a refactor? In reality it's going to cost a lot of\nmoney. The costs are:\n\n\n1. Cost of doing the refactoring.\n\n1. Cost of testing the change.\n\n1. Cost of updating tests and documentation.\n\n\nYou also have more risk:\n\n\n1. Risk of introducing bugs.\n\n1. Risk of taking on a huge task that you can't finish.\n\n1. Risk of not achieving the quality/improvements you intended.\n\n\nOur goals are:\n\n\n**Goal #1**: Make the code more maintainable. We want to make the process of\nadding new features easier. In the long term this refactor will save us\ntime, but it takes a significant amount of time to recoup the time spent\nrefactoring. The hard truth may be that a refactor usually does not save you\ntime, but can save you stress.\n\n\n**Goal #2**: What it can do, if done right, is make developers happy.\nNothing gives your team more horsepower than a happy, excited coder. A\nstressed-out coder will want to stop coding; an excited coder will not want\nto stop. A happy coder saves the most time.\n\n\nTo meet our goal our next step is to refactor the merge request comments\nsection. Our merge request comments are massively slow for merge requests\nwith lots of comments. The comments become slower and start to be less\nresponsive at around 200 comments. The diffs are slow as well. There are a\nton of reasons for this, one of which is that JavaScript is causing multiple\nreflows that take tons of time. We could refactor this and have already put\nin a fix, but this isn't a long-term solution.  In the case of a huge MR,\nthere was code that was causing a reflow that [takes over eight\nseconds](https://gitlab.com/gitlab-org/gitlab-ce/issues/39332)! This is now\nfixed. In this\n[image](https://gitlab.com/gitlab-org/gitlab-ce/uploads/e18856a1544d4d0e6420d11fd0479af7/ss__2017-10-20_at_1.41.04_PM.png) \nyou can see there is other stuff slowing things down. Clearly there is a lot\nof work to do here. Our biggest problem is that the code is not\nmaintainable, which means that fixes take longer. A refactor into Vue will\nprovide some great initial speed improvements, and lay the groundwork for\neasier improvements in the future.\n\n\nThere is so much work to do at GitLab. If you want to be a part of exploring\nthe massive catacombs of GitLab and writing awesome code and if you are\ninterested in helping out our Frontend team, then\n[apply](https://handbook.gitlab.com/job-families/engineering/development/frontend/).\n","engineering",[9,804],"inside GitLab",{"slug":806,"featured":6,"template":699},"gitlab-vue-one-year-later","content:en-us:blog:gitlab-vue-one-year-later.yml","Gitlab Vue One Year Later","en-us/blog/gitlab-vue-one-year-later.yml","en-us/blog/gitlab-vue-one-year-later",{"_path":812,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":813,"content":819,"config":827,"_id":829,"_type":13,"title":830,"_source":15,"_file":831,"_stem":832,"_extension":18},"/en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"title":814,"description":815,"ogTitle":814,"ogDescription":815,"noIndex":6,"ogImage":816,"ogUrl":817,"ogSiteName":685,"ogType":686,"canonicalUrls":817,"schema":818},"How DevOps and GitLab CI/CD enhance a frontend workflow","The GitLab frontend team uses DevOps and CI/CD to ensure code consistency, fast delivery, and simple automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679026/Blog/Hero%20Images/frontendworkflow.jpg","https://about.gitlab.com/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevOps and GitLab CI/CD enhance a frontend workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"José Iván Vargas\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":814,"description":815,"authors":820,"heroImage":816,"date":822,"body":823,"category":802,"tags":824},[821],"José Iván Vargas","2018-08-09","\nIt might seem like a lot of what we do on frontend is to make our lives easier,\nbut what I’ve learned in the past two years as a GitLab team-member and a community contributor\nis that if we make our lives easier, we can make a lot of customers happier, too.\nOver the years, I’ve experienced many changes at GitLab, from a change in processes\nto an increase in team members. From an early stage, the frontend team has been\ncommitted to continuous improvements, but working in a rapidly growing team\nrequired an investment in the way we work.\n\nWhen I joined GitLab we still used some of the default conventions that the [Rails\nframework](/blog/upgrade-to-rails5/) recommended for the frontend, and it helped us for quite a while, but\nthe more code we touched, the more code we needed to test and build for\nperformance, making it more challenging for us to maintain. The frontend team\nrealized that we needed a way to facilitate code consistency, fast delivery, and\nsimple automation, so we decided to incorporate [DevOps](/topics/devops/) and\n[CI/CD](/solutions/continuous-integration/) into our workflow.\n\n## Frontend DevOps and CI/CD workflow\n\nWe used CI in a few scenarios, including using linters to help write a consistent\nstyle of code throughout GitLab, but in the case of our JavaScript code, we\nrealized that building for performance and maintainability was becoming\nincreasingly difficult. So, we moved away from the\n[asset pipeline and utilized webpack](/blog/vue-big-plan/),\nwhich has given us a series of benefits. For example,  when we develop locally,\ndebugging code is now a breeze, and the jobs that are frontend related run on\nproduction-bundled code, ensuring a testing environment that closely resembles\nthat of a user.\n\nAfter CI, we publish code using DevOps by hosting it with\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/)). We’ve seen several projects benefit from\nadopting a DevOps model, including\n[GitLab SVG libraries](https://gitlab.com/gitlab-org/gitlab-svgs) and\n[Trello Power-Up](https://docs.gitlab.com/ee/integration/trello_power_up.html).\n\nWhen we created GitLab SVG libraries, we wanted to use them for ourselves and\nmake them available to the general public, so whenever we publish a new version,\nwe use GitLab Pages so that it’s fully automated every time.\n\nWith the Trello Power-Up plugin, we use DevOps to address compatibility\nissues when a new version of Trello is released. GitLab Pages makes it easy to\ndeploy a new version, in a fast and diligent manner, so that it’s accessible in\nthe Trello Marketplace as quickly as possible.\n\n## Frontend DevOps and Data-driven efforts\n\nIncorporating frontend DevOps and CI/CD into the workflow has had a significant\nimpact on efficiency and results. We have greater insight into our operations\nand have metrics to help us detect major areas of improvement. We set up\n[Sitespeed](https://www.sitespeed.io) using Kubernetes to analyze sets of pages\nand provide reports on anything that could hamper our users’ perceived\nperformance, from CSS and JavaScript bundle sizes to accessibility issues and\nthe render time differences between various points in time. The information we gathered using\nSitespeed has helped us improve the merge requests page and identify pages that\nrender slowly. Having more data has changed the way we approach problems at\nGitLab, because we are able to focus our efforts on specific areas.\n\n## The unexpected discovery of problems\n\nOne of the unexpected benefits of our workflow is the discovery of problems that\nwe may not have identified.\n\n### A lack of automation\n\nWe realized, for example, that we lack some automation in our tools. For\ninstance, every time we didn’t format code in a specific way, our linter\nnotified us, but analyzing and fixing the code slowed down developer velocity,\nso we decided to add [Prettier](https://prettier.io/) to format our code in our\nmerge requests for us. We also realized that, sometimes, we need a little bit of\nautomation when we publish code. As an all-remote company, many of us work on\npublic WiFi, and we found that unreliable connections could have detrimental\neffects while deploying code. The combination of CI and DevOps made deployments\neasier. If we triggered a pipeline and a coffee shop WiFi goes vamoose, it\ndoesn't matter. We already automated a significant part of our development\nprocess, but we’re always striving for more.\n\n### A lack of speed\n\nIn the case of CI, we noticed that our own tools can be a source of problems. We\nfound that we didn’t make the necessary considerations to keep our test suite fast.\nAs developers, we want to go back to developing as fast as possible. A few of my\nteammates discovered that our test runs were becoming slower and slower with each\nrelease. Even though these are not customer-facing changes, it has made both\nproduct managers and team managers consider investing in those issues, because\nthe easier the development cycle is for the developers involved, the better it\nis for our customers, since we can deliver even more features. Furthermore, we\ncan prevent regressions from happening by having solid foundations, such as\ntesting, code style, and code formatting.\n\nEvery time we discover problems that affect us or our work, we realize that we\ncan also jeopardize the features and experiences we want to deliver to our\ncustomers. It has changed the culture inside the team, because we view\nperformance issues as developers rather than as GitLab team-members.\n\n## Advice to frontend teams\n\nUsing DevOps and CI/CD in a frontend workflow is compatible with teams of any\nsize, including small teams that may want to ensure that their code styling is\nthe same.\n\n### Put a linter in place\n\nWith CI, the smallest and perhaps one of the most significant steps is\nto put a linter in place, and if the pipeline doesn't pass, you can’t merge the\ncode. That's such a simple, effective way to improve your code and to keep it\ntidy and clean in the long run. Just setting up some simple steps using CI will\nimprove your team’s code and your developers’ quality of life so that they don't\nhave to worry about combing through past code. Even though small teams might not\nfind the value in the short term, when they scale, they certainly will.\n\n### Create consistent scenarios\n\nThe bigger the project, the more you realize that some of your tooling ends up\nrunning locally, and it's beneficial to run it on CI. If something doesn't work\non a generic type of machine that has enough dependencies installed to run your\nCI setup, that means there’s something wrong and that you should probably fix it\nbefore merging your code. As long as you can create a consistent scenario in which\nyou can do things like testing and linting, you should be in a good position to\ndeliver a great product.\n\n### Select CI-compatible tools\n\nFor teams of all sizes, it’s important that the tools you select as part of your\nworkflow are compatible with CI in some way, so that even if you had a big part\nof your workflow running locally, you can easily move to CI by creating a pipeline\nthat resembles that of your daily workflow. Regardless of the tool that you choose,\ncreating a job for it will return a lot of value in the long run. If it makes\nsense, I encourage you to add it, because there’s very little incentive not to.\nCI-compatible tools include tests runners, linters, Prettier, or any custom-made\ntools that help you in some way. One decision you might want to avoid is creating\non servers that live on CI runners. Since they only run for a limited amount of\ntime, these servers will stop existing. You could also add deployments to your\nCI workflow, helping you with DevOps and preventing you from worrying about\ncomplicated local setups for new developers. The possibilities are huge.\n\n### Add performance testing\n\nTo add to the pool of possibilities, why not add performance testing to your\nmerge requests with a tool such as\n[Lighthouse](https://developers.google.com/web/tools/lighthouse/), which can\nhelp you understand potential performance bottlenecks in your website. Or, maybe\nyour team can add the ability to generate code documentation and publish it via\nGitLab Pages. CI/CD can be a really good tool, because it will return something\nimmediately. It's just a matter of how you want to use it, depending on your needs.\n\nThe more the frontend team uses CI and DevOps, the more we discover ways to use\nit, so it’s worth it to us to invest in this tool.\n\nSometimes, we just want to\nget stuff out there without too much consideration for tooling and CI and CD,\nbut because of the benefits we’ve experienced, we now include CI/CD in all of\nour projects. With GitLab, everything is integrated, so why skip it? Instead of\nfighting against automation, I encourage teams to embrace the idea that CI is\nthere to help you.\n\n[Cover image](https://unsplash.com/photos/UbGqwmzQqZM) by\n[Zhipeng Ya](https://unsplash.com/photos/UbGqwmzQqZM?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[9,825,826,719],"workflow","CI",{"slug":828,"featured":6,"template":699},"how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","content:en-us:blog:how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","How Devops And Gitlab Cicd Enhance A Frontend Workflow","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"_path":834,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":835,"content":841,"config":850,"_id":852,"_type":13,"title":853,"_source":15,"_file":854,"_stem":855,"_extension":18},"/en-us/blog/how-i-transitioned-from-frontend-to-ux",{"title":836,"description":837,"ogTitle":836,"ogDescription":837,"noIndex":6,"ogImage":838,"ogUrl":839,"ogSiteName":685,"ogType":686,"canonicalUrls":839,"schema":840},"How I transitioned from frontend to UX","One GitLab team-member shares how switching from a frontend engineer to a UX designer has been a rewarding experience.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679015/Blog/Hero%20Images/frontendux.jpg","https://about.gitlab.com/blog/how-i-transitioned-from-frontend-to-ux","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How I transitioned from frontend to UX\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Annabel Dunstone Gray\"}],\n        \"datePublished\": \"2018-10-05\",\n      }",{"title":836,"description":837,"authors":842,"heroImage":838,"date":844,"body":845,"category":846,"tags":847},[843],"Annabel Dunstone Gray","2018-10-05","\nWhen I joined GitLab over two and a half years ago as a frontend engineer, I brought with\nme a background in photography and an interest in art and design. In my last year\nof university, I worked at an art museum, and I’ve always gravitated towards the\nmore design-y aspects of frontend. For each release, my assigned deliverables\nwere usually focused on redesigns, and while I enjoy that type of work, what I\nreally wanted to do was to help shape the look and feel of GitLab, rather than\nimplementing the designs of others.\n\n## Making the first move\n\nAt GitLab, we're lucky to have the opportunity to [transfer](/handbook/people-group/promotions-transfers/#department-transfers)\nto a different department, if our interests or career goals change. I spoke with\nmy frontend manager about my passions and shared my desire to start learning and\nworking with the UX team. I then spoke with [Sarrah](/company/team/#SVesselov),\nthe UX Manager, about the next steps, and I started working through online\ntutorials, getting up to speed on Sketch, and attending the UX weekly calls.\nOnce I acquired the necessary technical skills, I joined the [Plan](/direction/#plan)\nteam, which is focused mostly on the prioritization of ideas, allocation of\nresources, scheduling, and tracking. It’s an area I’m really excited about, and\nwe’re working on some incredibly useful management features (like [improved issue boards](https://gitlab.com/gitlab-org/gitlab-ce/issues/48847), [sub-epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/7327), and [value stream management](https://gitlab.com/groups/gitlab-org/-/epics/229)) that will help make\nGitLab an even more powerful tool.\n\nAs a frontend engineer, I was fortunate to have developed many transferable\nskills which helped me tackle this new challenge. Attention to detail is one\nskill that has been particularly useful when working on a new feature. Since\nI’m new to UX, I’ve found it really helpful to have a technical background,\nespecially considering that GitLab is such a technical product.\n\n## Advice to others\n\n![Me and my daughter attending a frontend meeting.](https://about.gitlab.com/images/blogimages/annabelandbaby.jpg){:.shadow.small.right.wrap-text}\n\nIf you’re interested in making a similar transition, I encourage you to speak\nwith your manager. I wish I’d done so sooner. I discussed my interests early\nlast year, but after having a baby, I had this idea that I\nshould stay in my current role, as I would never have time to learn a whole new\npractice. While I definitely don’t have any free time (I don’t know if you’ve\nheard – babies are quite time consuming), I’m so happy to be on the UX team, even\nthough I have a lot of catching up to do. Everyone in both frontend and UX has\nbeen incredibly supportive of my switching teams, and I’m learning a lot as I go\nalong. For now, I’ve got the best of both worlds – 50 percent of my time is focused on\nstyling-related frontend issues and reviewing the CSS in merge requests, while\nthe other 50 percent is working on UX issues.\n\nBy the way, we're hiring for loads of positions, across the company – [check out our current job openings](/jobs/).\n\n[Cover image](https://unsplash.com/photos/aLGiPJ4XRO4) by [Bharath](https://unsplash.com/@xen0m0rph), licensed under [CC X](https://unsplash.com/license).\n{: .note}\n","culture",[848,9,849,804],"UX","careers",{"slug":851,"featured":6,"template":699},"how-i-transitioned-from-frontend-to-ux","content:en-us:blog:how-i-transitioned-from-frontend-to-ux.yml","How I Transitioned From Frontend To Ux","en-us/blog/how-i-transitioned-from-frontend-to-ux.yml","en-us/blog/how-i-transitioned-from-frontend-to-ux",{"_path":857,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":858,"content":864,"config":869,"_id":871,"_type":13,"title":872,"_source":15,"_file":873,"_stem":874,"_extension":18},"/en-us/blog/how-we-added-eslint-into-vue",{"title":859,"description":860,"ogTitle":859,"ogDescription":860,"noIndex":6,"ogImage":861,"ogUrl":862,"ogSiteName":685,"ogType":686,"canonicalUrls":862,"schema":863},"How eslint-plugin-vue improved our code reviews","A few months ago we felt the need to build a style guide for Vue and now are using eslint-vue-plugin, which is saving us time in our code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680227/Blog/Hero%20Images/code_cover_image.jpg","https://about.gitlab.com/blog/how-we-added-eslint-into-vue","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How eslint-plugin-vue improved our code reviews\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Filipa Lacerda\"}],\n        \"datePublished\": \"2018-02-13\",\n      }",{"title":859,"description":860,"authors":865,"heroImage":861,"date":866,"body":867,"category":802,"tags":868},[736],"2018-02-13","\n\nWe've (finally) integrated [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) successfully into our codebase!\n\n\u003C!-- more -->\n\nWhen we [added Vue](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5845) to our codebase back in April 2016, [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) did not yet [exist](https://github.com/vuejs/eslint-plugin-vue/commit/6a3a6db540e823b51af1e02950896ac9c2b49219) and we had not yet started using [eslint](https://eslint.org/) at all.\n\nOne of the things I love the most about GitLab being an open source tool is that anyone can contribute! [Winnie Hellmann](https://gitlab.com/winh), who has since joined the team, did this amazing work [adding eslint](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5445) as a community contribution. Thanks Winnie! 🙇‍\n\n## The start of a style guide\n\nAs our Vue codebase grew from a few features to quite a large usage ([issue boards](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5554), [environments](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8954), [cycle analytics](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7366), [pipelines](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/10878)) we noticed that each of our Vue apps followed a different style. At that time we felt the need to [document how to architecture a Vue application](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8866) to ensure a consistent codebase. Once we defined and documented how to use the component system and Flux architecture [with our codebase](https://docs.gitlab.com/ee/development/fe_guide/vue.html#vue-architecture), we noticed that our Vue code also differed in very small things, such as indentation or the order we declared the methods. These inconsistencies, although small, increased the complexity of the review process and for maintaining a healthy codebase.\n\nWith the goal of decreasing the time we spent reviewing Vue code and debating on each of these aspects, and because at the time there wasn't an official Vue style guide, [we started our own](https://gitlab.com/gitlab-org/gitlab-ce/commit/8c3bdc853a5237a3bef6e26fcf22132db7e8bd9c)! You can check out our documentation [here](https://docs.gitlab.com/e e/development/fe_guide/style_guide_js.html#vue-js). As the Vue community grew, the need for an official style guide and for an eslint plugin for Vue grew with it. Thanks to the wonderful team [Michał Sajnóg](https://github.com/michalsnik), [Toru Nagashima](https://github.com/mysticatea), [Armano](https://github.com/armano2) and [Chris Fritz](https://github.com/chrisvfritz) leading the development of such a tool, we are now able to use it in production! And we even got to act as source of [inspiration for the official one](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845) ❤\n\n## Adding eslint-vue-plugin\n\nAfter [waiting a couple of months](https://gitlab.com/gitlab-org/gitlab-ce/issues/34312) for a stable version of [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue), we finally gave it a try once version [4.0.0](https://github.com/vuejs/eslint-plugin-vue/releases/tag/v4.0.0) was released.\n\n![EE Conflicts](https://about.gitlab.com/images/eslint-vue-plugin/eslint-conflicts-team-help.png \"EE Conflicts\"){: .shadow}\n\n*\u003Csmall>Frontend team working together to resolve all the vue eslint problems\u003C/small>*\n\nIt took a couple of days to fix all the problems eslint identified in our code, but we were able to successfully add it and thanks to a huge team effort, the second row of conflicts was solved very quickly. Thanks again Luke, Eric, Kushal and José!\n\nNow our review process is even faster, we don't have to manually check for the style guide rules anymore! 🎉\n\n[Cover image](https://pixabay.com/en/computer-computer-code-screen-1209641/) by [Free-Photos](https://pixabay.com/en/users/Free-Photos-242387/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[804,9],{"slug":870,"featured":6,"template":699},"how-we-added-eslint-into-vue","content:en-us:blog:how-we-added-eslint-into-vue.yml","How We Added Eslint Into Vue","en-us/blog/how-we-added-eslint-into-vue.yml","en-us/blog/how-we-added-eslint-into-vue",{"_path":876,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":877,"content":883,"config":889,"_id":891,"_type":13,"title":892,"_source":15,"_file":893,"_stem":894,"_extension":18},"/en-us/blog/introducing-gitlab-s-integrated-development-environment",{"title":878,"description":879,"ogTitle":878,"ogDescription":879,"noIndex":6,"ogImage":880,"ogUrl":881,"ogSiteName":685,"ogType":686,"canonicalUrls":881,"schema":882},"Meet the GitLab Web IDE","Here's how we went from a proof of concept to a new feature that makes it even easier for everyone to edit inside of GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/introducing-gitlab-s-integrated-development-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet the GitLab Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dimitrie Hoekstra\"}],\n        \"datePublished\": \"2018-06-15\",\n      }",{"title":878,"description":879,"authors":884,"heroImage":880,"date":886,"body":887,"category":802,"tags":888},[885],"Dimitrie Hoekstra","2018-06-15","\n\nGitLab has been doing much more for the application development workflow than just source code management and versioning for a while – now spanning everything from [portfolio management](https://docs.gitlab.com/ee/user/group/epics/index.html#epics) to the [entire DevOps lifecycle](/blog/from-dev-to-devops/). Having everyone work from and be familiar with the same interface has many advantages.\n\nAll that code that gets automatically tested and deployed to production has a human at its source though. With the speed of innovation in today’s web development, we saw a chance to help out both new as well as seasoned developers with writing, reviewing, and committing that code with more confidence. In [GitLab 10.7](/releases/2018/04/22/gitlab-10-7-released/) we released the first iteration of our Web IDE – here's how it happened.\n\n## From experiment towards product\n\nThe original idea came from staff developer [Jacob Schatz](/company/team/#jakecodes), who observed how non-developers were having a hard time editing multiple files and getting those changes committed.\n\nAlthough having discussed implementing an Integrated Development Environment (IDE) into GitLab with our CEO [Sid](/company/team/#sytses) and VP of Product [Job](/company/team/#Jobvo) before, it was never clear how to do that and what exact problems it would solve.\n\nAt some point, it dawned on us that the repository view might be the right vessel. Jacob set up a proof of concept where he made our file viewer work in the context of a file editor. It removed the page refresh when switching between files and it approached editing from a branch perspective instead of per file. The result was the beginning of the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), although it was called the \"repo editor\" at that time.\n\n![Proof of concept multi-file editor](https://about.gitlab.com/images/blogimages/webide/multifileeditor.png){: .shadow.medium.center}\n\nSetting up that proof of concept was a [tremendous amount of work](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12198) and was time-boxed to one month. Jacob also had other responsibilities, and there was still a long way to go from concept to minimal viable product (MVP).\n\nProduct, UX, and other developers got involved to see if this could be pushed towards production. The concept solved a problem, but did it align with our vision? How could we holistically integrate this and make it a great experience? How could we get it to perform well for many different users?\n\n## The next phase\n\nIt took some time, but it was clear that we were aiming for a real integrated development experience, accessible for everyone right within the GitLab UI, without anything to install. The idea grew from the \"Repo editor\" into that of the \"Web IDE.\"\n\nGitLab itself is open source (or rather [open core](/blog/gitlab-is-open-core-github-is-closed-source/)) and relies on many open source projects for its development. Jacob had already decided that the [Monaco editor](https://microsoft.github.io/monaco-editor/) was the perfect code editor to integrate. It had already proven itself within different contexts, was great for performance, and so could be considered a [boring solution](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nOur UX manager [Sarrah Vesselov](/company/team/#SVesselov) did the initial design for the concept after which it got passed on to me. It was up to our platform product manager [James Ramsay](/company/team/#jamesramsay), our frontend engineering manager [Tim Zallman](/company/team/#tpmtim), senior frontend engineer [Phil Hughes](/company/team/#iamphill), and I as the UX Designer to redefine the prototype \"multi-file editor\" into the foundation capable of supporting our vision of an Integrated Development Environment with live previews and web terminals, that enables anyone to contribute.\n\n## Iterating on user experience\n\n### An integrated editor\n\nThe original \"multi-file editor\" was about committing multiple changes at once because this was annoying when updating the handbook or docs. Often those changes touched multiple files. It was a prototype that made it easier for people to contribute.\n\nThe more we thought about this idea, the greater the possibilities became. One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit. However, the starting point of a prototype in the file list and blob editor wouldn't have been enough to handle this. Decoupling this was the first actionable item.\n\n>One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit.\n\nThis change, which required a lot of discussion and a considerable amount of engineering work by our developers Phil and Tim, was where the project pivoted towards its new direction. The Web IDE got a lot more screen real estate as it no longer had to make room for the project sidebar and other page elements. We decided that the Web IDE would edit one branch at a time only and conceptualized the initial Git flow into the editor. Based on existing UI paradigms and inspired by other code editors like [VSCode](https://code.visualstudio.com/) and [Atom](https://atom.io/), we arrived at the well-known, three-pane layout.\n\n\u003Cdiv class=\"compare-images-2\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-0-concept.png\" class=\"compare-image-top shadow\" alt=\"multi file editor concept\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-1-concept.png\" class=\"compare-image-bottom shadow\" alt=\"web ide file editor concept\">\n\u003C/div>\n\nEven seasoned developers were once beginners, and getting new people accustomed to the Git workflow continues to be notoriously hard to tackle. We decided therefore that the core of the Web IDE experience should be stable before we can venture into more advanced concepts. We set out to make the \"editing to committing\" experience as good as possible and to create a foundation on which we can expand.\n\nEven while having [these discussions](https://gitlab.com/gitlab-org/gitlab-ce/issues/44316), development never stood still. We quickly had a working version of the Web IDE that relied on the Monaco editor. Our immediate efforts pushed towards getting that to a functional, viable state.\n\n### A review state\n\nDue to the potency of the Monaco editor, it became clear we had many options to choose from as to what to develop next. A review state was high up on that list, as it should be obvious what you are going to commit. Not only that, it introduced the possibility of being able to have an integrated merge request review experience in the context of the editing experience – something that has not been possible before.\n\nThis introduced the problem of managing states. After much discussion, we decided to go for editor states instead of file-specific states. Both the user perspective as well as the technical implementation benefited from this as it reduced complexity. It meant you were either editing your files or reviewing your changes across the files you had opened.\n\n![Web IDE edit and review states](https://about.gitlab.com/images/blogimages/webide/web-ide-states.png){: .shadow.medium.center}\n\nAt this point, we are nearing the current state of the Web IDE, though in GitLab 10.8 we could finally [realize the \"editing to committing\" experience](https://gitlab.com/gitlab-org/gitlab-ce/issues/44846) that we talked about before and which was conceptualized and [prototyped](https://framer.cloud/Cojmw/index.html) while developing GitLab 10.7. This was made possible as development reached a more stable state.\n\n### Deciding on hierarchy\n\nThe new experience had several objectives. It needed to introduce a more logical hierarchy for the panes to operate in. Based on that we could decide which panes would potentially show what information and where we could fit in any future more advanced features.\n\nThe second objective was to guide the user more intuitively from editing to committing. The editing and reviewing experience up until then showed its shortcomings as it was hard to switch modes and unclear when you were doing a good job. If even seasoned developers had a hard time using it, how could people just starting out ever hope to successfully contribute making use of it?\nJames and I went through many concepts and discussed both flow and hierarchy before getting into detailed mockups. Through the iterations, it became apparent we preferred our hierarchy to act from left to right. We decided we needed a similar paradigm as the activity bar shown in VSCode. The editor became far more usable as state changes were just one click away, regardless of which state you were already using. As committing was now a separate state as well, it brought a linearity to the entire flow as seen from the activity bar.\n\nThe last significant detail, which came out of a discarded design iteration, was a button to guide the user towards committing their changes. It introduced a little section at the bottom of each state with a blue commit button and a counter so you can see how many changes you have made – essential as we repurposed the right sidebar.\n\n\u003Cdiv class=\"compare-images-3\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-1.png\" class=\"compare-image-top shadow\" alt=\"web ide revised concept edit mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-2.png\" class=\"compare-image-middle shadow\" alt=\"web ide revised concept review mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-3.png\" class=\"compare-image-bottom shadow\" alt=\"web ide revised concept commit mode\">\n\u003C/div>\n\n*Interested to see all iterations the concepts have gone through? Check out my [Web IDE directory](https://gitlab.com/gitlab-org/gitlab-design/tree/master/progress/dimitrie/web-ide) in GitLab's open source design library where we contribute all our design files!*\n\n## Just the beginning\n\nThe current state of the Web IDE is still only the beginning. We are planning for an even better experience in the future: one where we can integrate and support more advanced features, such as a live environment to test your code against and code review discussions which are directly resolvable.\n\nIn GitLab 11.0, shipping next Friday, we will already have the following improvements: you will be able to view the latest pipeline status and the job logs directly in context, and you will be able to quickly switch between both assigned and authored merge requests without leaving the Web IDE!\n\nThis and more will inevitably lead towards more interesting design decisions to be made. Some of these concepts are uncharted territory and are sure to be valuable to further speed up development and give developers more confidence. Our hope is that this is a valuable contribution to both the open source community as well as GitLab itself.\n\nDo you have great ideas to push this effort forwards or want to contribute yourself? Check out the [issue tracker](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=web%20ide)!\n",[804,9,848],{"slug":890,"featured":6,"template":699},"introducing-gitlab-s-integrated-development-environment","content:en-us:blog:introducing-gitlab-s-integrated-development-environment.yml","Introducing Gitlab S Integrated Development Environment","en-us/blog/introducing-gitlab-s-integrated-development-environment.yml","en-us/blog/introducing-gitlab-s-integrated-development-environment",{"_path":896,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":897,"content":903,"config":910,"_id":912,"_type":13,"title":913,"_source":15,"_file":914,"_stem":915,"_extension":18},"/en-us/blog/iteration-on-error-tracking",{"title":898,"description":899,"ogTitle":898,"ogDescription":899,"noIndex":6,"ogImage":900,"ogUrl":901,"ogSiteName":685,"ogType":686,"canonicalUrls":901,"schema":902},"Why we scoped down to build up error tracking ","We dig into how shipping small iterations is accelerating delivery on our error tracking product.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665440/Blog/Hero%20Images/automate-ce-ee-merges.jpg","https://about.gitlab.com/blog/iteration-on-error-tracking","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we scoped down to build up error tracking \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-23\",\n      }",{"title":898,"description":899,"authors":904,"heroImage":900,"date":906,"body":907,"category":802,"tags":908},[905],"Sara Kassabian","2020-01-23","When our vision for [error tracking](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/) is fully realized, the developers who use GitLab will be able to find and fix errors before their customers ever report them, all while staying in our tool. But waiting until our error tracking feature is pristine would just us slow down.\n\nInstead, the engineers and product managers on the [Monitor:Health](https://handbook.gitlab.com/handbook/engineering/development/ops/monitor/respond/) team work **iteratively** by shipping smaller changes as we move closer to achieving our vision for the error tracking feature.\n\n## What does it mean to work iteratively?\n\n\"[Iterating] means scoping down a task to deliver it sooner. So, it means making something smaller so you can get it done quicker,\" says [Sid Sijbrandij](/company/team/#sytses), CEO and co-founder of GitLab.\n\nWe made [iteration](https://handbook.gitlab.com/handbook/values/#iteration) one of our core company values because of the fundamental belief that even a small change is better than no change at all. And while iteration in engineering is already recognized as being effective, our organization aims to make iteration a component to every team’s workflow.\n\nIn the video below, Sid and [Christopher \"Leif\" Lefelhocz](https://about.gitlab.com/company/team/#christopher-l), senior director of development, share how the product and engineering teams worked together to speed up development on error tracking by breaking the engineering process down into small steps and iterating as they go.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tPTweQlBS54\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe followed up with the Monitor:Health team to talk about how product and engineering worked together to develop an iterative strategy for making improvements to our error tracking product, both in terms of how our product team built the plan for error tracking and how engineering shipped the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) (MVC) to production.\n\n## How we created a product strategy for error tracking\n\nError tracking is a process whereby application errors are identified and fixed as quickly as possible. The way error tracking functions at GitLab today is [through integration with Sentry](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), which aggregates errors, surfaces them in the GitLab UI, and provides the tools to triage and respond to the critical ones.\n\nToday, our error tracking feature is at the [minimal level of maturity](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), meaning we still have plenty of work to do before this feature is viable.\n\n\"The goal was to be able to provide error tracking as a product and bring these processes closer to the development delivery workflow,\" said [Sarah Waldner](/company/team/#sarahwaldner), senior product manager on the Monitor:Health team.\n\nThe product team summarized what needs to be done to move [error tracking at GitLab from minimal to viable](https://gitlab.com/groups/gitlab-org/-/epics/1625) as part of a detailed [parent epic](https://docs.gitlab.com/ee/user/group/epics/#multi-level-child-epics). The parent epic essentially establishes product priorities by defining which use cases error tracking needs to solve in order for the product to be considered a viable feature. The next step was to define the core problems that users encounter with error tracking and double-check the solutions that should be used to solve those problems.\n\n\"Once we came up with these problems and validated those, we moved into a solution validation cycle whereby designers came up with different solutions and flows for these and then we tested them with different users,\" says Sarah. \"After we did all of that and have all of our solutions validated we broke it down into four different things that someone needs to do from a high level with Sentry.\"\n\nThose top four actions were divided into child epics which roll-up to the parent epic, and include:\n\n*   [The instrumentation or configuration of Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2036)\n*   [Correlating errors](https://gitlab.com/groups/gitlab-org/-/epics/2035)\n*   [Resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034)\n*   [Triaging errors](https://gitlab.com/groups/gitlab-org/-/epics/2029)\n\nBy breaking down the problems and establishing solutions, the team took an important step toward establishing their product development priorities. Contained in each of these child epics are other epics and issues which break down the solutions into the larger aspects.\n\n## Establishing development priorities\n\nThe team recognized that, in order to boost error tracking to viable, there needed to be a better way to resolve errors that are surfaced by Sentry within GitLab. The team created an epic for [resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034), and outlined some of the key development priorities.\n\n\"So, to resolve errors, if you have an error that you need to fix, you might want to create an issue to track that work, respond to it, and close that issue in the general workflow,\" says Sarah. \"So within the resolving errors workflow part of the error tracking parent epic, we pose the idea of being able to manually open an issue from a Sentry error, which was then broken down further into where you do it from, and further again on the error detail page.\"\n\n![Resolve errors epic](https://about.gitlab.com/images/blogimages/resolve_errors_epic.png){: .shadow.medium.center}\nThe workflow for the resolve errors epic is broken down into multiple child epics, which correlate to different development projects.\n{: .note.text-center}\n\nThe team decided that we needed the ability to [create an issue within GitLab based on the errors detected by Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2210) and that they wanted this function and button to appear on both the error list page as well as on the [error detail page](https://gitlab.com/groups/gitlab-org/-/epics/2210). The team then decided to make the error detail page the first priority.\n\n\"Through conversation, we were able to determine what is the bare minimum of value and broke it down as best as we could from frontend to backend, with the idea that it's better to ship something small that's not fully complete than (to ship) nothing at all,\" says [Clement Ho](/company/team/#ClemMakesApps), frontend engineering manager on Monitor:Health.\n\n## The \"Create an Issue\" button in three iterations\n\n\"Being able to open an issue from the error detail page seems really simple, but once you talk through what that workflow actually looks like, there are a lot more aspects to it than previously thought,\" says Sarah.\n\n![Open issue workflow](https://about.gitlab.com/images/blogimages/open_issue_epic.png){: .shadow.medium.center}\nBreaking the frontend and backend engineering into iterations shows just how much work needs to be done to ship even one minor component of the error tracking product.\n{: .note.text-center}\n\n### The \"Create an Issue\" button in stages\n\nClement was the architect behind the `Create an Issue` button frontend iterations. He explained that he wanted to take advantage of GitLab deploying frequently, and so he broke down the development process for the `Create an Issue` button into a series of small steps.\n\nThe [first iteration](https://gitlab.com/gitlab-org/gitlab/issues/36537) was simply to build the ability to create an issue from the error detail page. In this iteration, the `Create an Issue` button was simple and unstyled and clicking it led the user to a blank issue. While not overly helpful at this phase, it represents a good start in allowing someone to respond to an error.\n\n![Create an Issue button](https://about.gitlab.com/images/blogimages/create_an_issue_it1.png){: .shadow.medium.center}\nWhat the `Create an Issue` button will look like when it's done.\n{: .note.text-center}\n\nIn the [second iteration](https://gitlab.com/gitlab-org/gitlab/issues/36540), the user clicks `Create an Issue` and the issue comes pre-filled with the Sentry error title, description, and link. It’s still not styled and consistent with GitLab UI yet, but it’s possible to see more of the error context when creating an issue in response to the error.\n\nIn the [third iteration](https://gitlab.com/gitlab-org/gitlab/issues/36542), the GitLab UI gets cleaned up and the issue comes with proper formatting.\n\n\"Now, we are three issues into this and each one has been done in a couple of days and after the first couple of days, someone was able to create an issue,\" says Sarah. \"And that way we got the system much faster instead of first adding the button and then adding the experience of the new issue and then having all of the information in there styled.\"\n\n### Is it better to start with frontend or backend engineering?\n\nAs Christopher noted in his [conversation with Sid](https://www.youtube.com/watch?v=tPTweQlBS54), everything that Clement was working on in the first three iterations was frontend-focused; typically engineers start problem-solving from the backend.\n\n\"I love frontend first. I love interface first also because it helps everyone think about it,\" says [Sid in to Christopher regarding this project](https://www.youtube.com/watch?v=tPTweQlBS54). \"If you have something in the interface it's easier to understand for customers, for backend people, etc. So in the end what the customer sees is the product. One way to develop is to start with the readme or start with the press release. After that, the closest thing you can think of is the interface. So I think it's much better to have an interface built and then do the backend than vice versa. Even though I come from backend engineering.\"\n\nJust a few days after Clement started building the frontend of the `Create an Issue` button the backend team started building support in separate issues. The main priority was to build backend support that associates issues to errors so that users are not creating multiple issues for the same error. The engineers also built frontend support so the user can see that an issue was already created and linked to a particular error.\n\n## The power of iterative thinking\n\n\"One huge thing that came out of this is all team members now feel empowered to create issues and to just add them to the milestone and if they realize something is too big, they can create followups or second iterations,\" says Sarah.\n\nWhile the end goal is to build a viable error tracking product, the big vision simply cannot be achieved without smaller, incremental steps. While it is clear that the engineering teams embraced iteration, Sarah and the product team also recognized the strong strategic value of iterative product development.\n\nAt the same time, Clement wanted to take advantage of GitLab’s frequent deployments, but he also realized that by breaking down the engineering process into MVCs he could also drive up [merge request rate](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate) on the Monitor:Health frontend engineering team (the average number of merge requests per engineer merged per month) which is a [KPI](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate).\n\n![MR rate increases](https://about.gitlab.com/images/blogimages/mrs.png){: .shadow.medium.center}\nThe data shows an increase in the rate of merge requests on the Monitor:Health frontend engineering team.\n{: .note.text-center}\n\nThe data speaks for itself, since breaking down the product development process for error tracking into smaller iterations, the MR rate for Clement’s team has increased. 🎉\n\n## Scoping down to speed things up\n\nClement says that one of his key takeaways from this iterative development process was that GitLab ought to embrace iteration on the engineering side, but also iteration in product development. He is encouraging his team to ship MVCs more frequently, and plans to check his work by running through the process a few more times to iron out any wrinkles in the workflow.\n\nWhile the highly iterative approach to error tracking has been lauded by everyone from the senior director of development to our very own CEO, Clement acknowledges that this is still a work-in-progress.\n\n\"I think the cost is communication and information being spread out everywhere,\" Clement says.\n\nHe advises teams looking to adopt this highly iterative approach be extra disciplined at consolidating conversation on specific epics and issues within GitLab, otherwise, communication can get unwieldy, fast.\n\nCover photo by Max Ostrozhinskiy on Unsplash.\n{: .note}\n",[719,9,909,825],"production",{"slug":911,"featured":6,"template":699},"iteration-on-error-tracking","content:en-us:blog:iteration-on-error-tracking.yml","Iteration On Error Tracking","en-us/blog/iteration-on-error-tracking.yml","en-us/blog/iteration-on-error-tracking",{"_path":917,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":918,"content":924,"config":930,"_id":932,"_type":13,"title":933,"_source":15,"_file":934,"_stem":935,"_extension":18},"/en-us/blog/journey-in-native-unicode-emoji",{"title":919,"description":920,"ogTitle":919,"ogDescription":920,"noIndex":6,"ogImage":921,"ogUrl":922,"ogSiteName":685,"ogType":686,"canonicalUrls":922,"schema":923},"Our journey in switching to native Unicode emoji","Unicode is hard. Here's a guide to getting native Unicode Emoji right 👌. Learn more!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672573/Blog/Hero%20Images/journey-in-native-unicode-emoji-cover.png","https://about.gitlab.com/blog/journey-in-native-unicode-emoji","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our journey in switching to native Unicode emoji\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Eastwood\"}],\n        \"datePublished\": \"2018-05-30\",\n      }",{"title":919,"description":920,"authors":925,"heroImage":921,"date":927,"body":928,"category":802,"tags":929},[926],"Eric Eastwood","2018-05-30","The switch from image-based emoji to native Unicode wasn't a straightforward\njourney and included many intricacies to get production ready. Support\nvaries widely on each OS, even between the browsers on the OS. We also\nwanted to support falling back to image-based emoji for environments that do\nnot support everything yet, otherwise people would see black squares (□). As\na simple example, most Linux environments do not have Unicode emoji support\nunless you manually install a font. I consider this blog post the survival\nmanual I wish I had had when implementing native Unicode emoji myself.\n\n\n## What is Unicode emoji?\n\n\nUnicode emoji is a universal character encoding standard maintained by the\n[Unicode Consortium](https://home.unicode.org/basic-info/overview/) and It\nprovides the basis for processing, storing, and interchanging text data in\nany language. As far as emojis themselves are concerned, this is the\nencoding system that develops and houses all emojis. Emojis are encoded in\nthe Unicode system based on appearance rather than a specific semantic. \n\n\n## Are Unicode emojis compatible with all devices?\n\n\nThe short answer is yes!\n\n\nAll modern software providers have become compatible with Unicode so that\ndata can be transferred freely without corruption, regardless of platform,\nlanguage, or device. \n\n\nBefore Unicode, there were multiple character encoding systems to assign\nnumbers to each of the letters and numbers that were used by computers. But\nthese character encoding systems simply couldn’t keep up with the volume of\nlanguages using different letters and numbers. The data passing through\nthese different encodings ran the risk of being corrupted due to a lack of\nsufficient support from a given computer - particularly servers.\n\n\nAnd so, a new system was born: Unicode.\n\n\n## Why move to native Unicode emoji?\n\n\nWe decided to switch to Unicode emoji because it was in line with our\ndecision to use system fonts and it reduces the number of images loaded on a\npage. You can see the [full discussion in this\nissue](https://gitlab.com/gitlab-org/gitlab-ce/issues/26371). We were also\n[interested in](https://gitlab.com/gitlab-org/gitlab-ce/issues/22474)\n[improving](https://gitlab.com/gitlab-org/gitlab-ce/issues/27250) the award\nemoji menu (emoji reaction selector) performance, so it would open quickly\nwithout an AJAX request and with less janky scrolling.\n\n\nThe first step was to find a way to detect whether a given Unicode emoji is\nsupported. Since new emoji/characters are introduced in new\nversions/releases of Unicode specifications from the [Unicode\nConsortium](http://unicode.org/), we can consider every emoji in that\nversion supported if a single emoji in that version tests positively. There\nare exceptions to assuming support for a whole Unicode version, but we can\nhandle them individually as they come up. Unicode 10 is the current stable\nrelease but [Unicode Consortium](http://unicode.org/) is working on\nfinishing up Unicode 11 and starting on Unicode 12 at the moment. The\nUnicode Consortium has [a full table of emoji here with the representation\non various\nplatforms](https://unicode.org/emoji/charts/full-emoji-list.html).\n\n\n## Testing for native emoji Unicode support\n\n\nWe test an emoji from each Unicode version/release and cache that locally\n([`localStorage`](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage))\nin a support map to look up later whether a given emoji is supported. If the\nemoji isn't supported we fall back to an image or CSS sprite depending on\nthe situation.\n\n\nI couldn't find any existing library or JSON document that mapped a given\nemoji to their respective Unicode version/release, so I created my own\nproject that scrapes [emojipedia](https://emojipedia.org/) and assembles a\nJSON map,\n[`emoji-unicode-version`](https://www.npmjs.com/package/emoji-unicode-version)\non npm.\n\n\nTo test whether a Unicode emoji works, we render it to a `\u003Ccanvas>` and\ninspect the pixels in the exact middle for any color (if it is black, then\nthe test fails). We also have to ensure the emoji renders as a single\ncharacter because some emoji are made up of multiple characters (see [ZWJ\nsequences and skin tone modifier sections\nbelow](#emoji-made-up-of-multiple-characters)).\n\n\nWhen choosing a specific emoji for each version to test, be sure to choose\nsomething with color. As an example, I initially chose ⚽ `:soccer:` in the\nUnicode 5.2 range but since it is a black and white emoji, it always failed\nso I switched to ⛵ `:sailboat:`.\n\n\nWe invalidate the support map whenever your user-agent changes because emoji\nsupport changes when you get a browser or OS update. We also add a manual\n`GL_EMOJI_VERSION` for busting the cache when we update the support check\nlogic.\n\n\nYou can check out our implementation here,\n[`app/assets/javascripts/emoji/support/unicode_support_map.js`](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/emoji/support/unicode_support_map.js),\n[`app/assets/javascripts/emoji/support/is_emoji_unicode_supported.js`](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/emoji/support/is_emoji_unicode_supported.js)\n\n\n### Rendering emoji to a canvas in Internet Explorer gotchas\n\n\nWhen rendering emoji to a `\u003Ccanvas>`, IE11 didn't like our full font-stack\nand renders small black and white emoji, which are less than ideal.\n\n\nThe culprit is the `-apple-system` piece 😕\n\n\n```js\n\nctx.font = `${fontSize}px -apple-system, BlinkMacSystemFont, \"Segoe UI\",\nRoboto, Oxygen-Sans, Ubuntu, Cantarell, \"Helvetica Neue\", sans-serif, \"Apple\nColor Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\"`;\n\n```\n\n\nBut if you simply go with the emoji part of the stack, it renders the nice\ncolorful emoji as expected,\n\n\n```js\n\nctx.font = `${fontSize}px \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI\nSymbol\"`;\n\n```\n\n\nFull font-stack | Small emoji font-stack\n\n--- | ---\n\n[![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-full-font-stack.png)](/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-full-font-stack-large.png)\n|\n[![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-short-font-stack.png)](/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-short-font-stack-large.png)\n\n\n### Unicode 1.1 emoji not rendering as colorful, fancy glyphs when using\nfull font-stack\n\n\nWe also switched to using a shorter, emoji-only font-stack in CSS to get\nsome of the Unicode 1.1 emoji to render colorfully. Read [more in the\nissue](https://gitlab.com/gitlab-org/gitlab-ce/issues/29557#note_25544684).\n\n\nFull font-stack | Small emoji font-stack\n\n--- | ---\n\n![font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto,\nOxygen-Sans, Ubuntu, Cantarell, \"Helvetica Neue\", sans-serif, \"Apple Color\nEmoji\", \"Segoe UI Emoji\", \"Segoe UI\nSymbol\";](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/unicode-1-1-full-font-stack.png)\n| ![font-family: \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI\nSymbol\";](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/unicode-1-1-short-font-stack.png)\n\n\n### Render emoji on Canvas at 16px\n\n\nWe use `16px` font size when rendering to the `\u003Ccanvas>` because mobile\nSafari (iOS 9.3) will always render at 16px regardless of the font size you\nspecify.\n\n\nThe `32px` pixel example below is rendering at the same size as the `16px`\nexample. If it worked correctly, the `32px` would fill up the empty space.\n\n\n32px | 16px\n\n--- | ---\n\n\u003Ca\nhref=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-32px-large.png\">\u003Cimg\nsrcset=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-32px.png\n2x\">\u003C/a> | \u003Ca\nhref=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-16px-large.png\">\u003Cimg\nsrcset=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-16px.png\n2x\">\u003C/a>\n\n\n## Unicode emoji bounds and baseline positioning is different across\nplatforms\n\n\nAnother issue we ran into when switching is the inconsistency in how emoji\nvertically align across platforms. The baseline defined in each platform\nfont is different, which makes tweaks to center on one platform throw off\nanother. We didn't find any good solution for perfect vertical centering and\nopted just to leave it for now. You can read the [full discussion\nhere](https://gitlab.com/gitlab-org/gitlab-ce/issues/33044#note_34375144).\n\n\nFor an in-depth dive into font metrics (not emoji specific), see [*Deep dive\nCSS: font metrics, line-height and\nvertical-align*](http://iamvdo.me/en/blog/css-font-metrics-line-height-and-vertical-align)\nby Vincent De Oliveira (aka iamvdo).\n\n\n## Emoji fallbacks\n\n\nWe define optional fallbacks for images and CSS sprites directly on the\nelement. In terms of priority, when `data-fallback-css-class` is defined on\nthe emoji element, we opt to use the CSS sprite. We only sprite things like\nthe award emoji menu, which lists every emoji at once and potentially needs\nto fall back on everything for platforms that don't support Unicode emoji\n(like Linux).\n\n\n```html\n\n\u003Cgl-emoji data-fallback-src=\"emoji-xxx.png\"\ndata-fallback-css-class=\"emoji-xxx\">\n  xxx\n\u003C/gl-emoji>\n\n```\n\n\nWe use\n[`document.registerElement()`](https://developer.mozilla.org/en-US/docs/Web/API/Document/registerElement)\nin order to hook whenever a `\u003Cgl-emoji>` is used on the page or created and\ntest whether we need to fall back. We use the deprecated v0 web components\n`document.registerElement()` over the new v1\n[`CustomElementRegistry.define()`](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/define)\nbecause that only works with ES2015 class syntax and in our case, Babel is\ntranspiling everything which makes that syntax incompatible for now. It is\nalso necessary to use a [`document.registerElement()`\npolyfill](https://github.com/WebReflection/document-register-element) for\nbrowsers that don't support it like Safari.\n\n\nWhen we fall back to a CSS sprite, we add the necessary `.emoji-icon`\nclasses to the `\u003Cgl-emoji>` tag. These extra CSS classes hide the emoji\nUnicode content inside so only the background image is visible.\n\n\n```css\n\n.emoji-icon {\n  /* Hide emoji Unicode */\n  color: transparent;\n  /* Hide emoji Unicode in IE */\n  text-indent: -99em;\n  /* ... */\n}\n\n```\n\n\nYou can check out our [`\u003Cgl-emoji>` implementation\nhere](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/behaviors/gl_emoji.js).\n\n\n## Emoji made up of multiple characters\n\n\nSome emoji are composed of multiple characters, which can be tricky to work\nwith in JavaScript.\n[`Array.from`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/from),\n[`String.prototype.codePointAt()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/codePointAt)\nare all your friends here. There is a great article, [*JavaScript has a\nUnicode problem*](https://mathiasbynens.be/notes/javascript-unicode) by\nMathias Bynens, going into more detail.\n\n\n#### Zero Width Joiner (ZWJ) sequences\n\n\nZero Width Joiner (ZWJ) sequences are composed of multiple emoji characters\njoined by a ZWJ character `\\u{200D}`, `&zwj;`(non-printing character). You\ncan read more about [ZWJ sequences\nhere](http://emojipedia.org/emoji-zwj-sequences/).\n\n\n👨‍👩‍👧‍👦 `:family_mwgb:`\n\n```\n\n[...'👨‍👩‍👧‍👦']\n\n// [\"👨\", \"‍\", \"👩\", \"‍\", \"👧\", \"‍\", \"👦\"]\n\n```\n\n\n#### Skin tone modifier\n\n\nSkin tone modifiers don't need a ZWJ character to combine with another\nemoji. You can read more about the [skin tone modifiers\nhere](http://emojipedia.org/modifiers/).\n\n\n👨🏿 `:man_tone5:`\n\n```\n\n[...'👨🏿']\n\n// [\"👨\", \"🏿\"]\n\n```\n\n\nI opted to test multiple skin tone modifier combos and only if all pass,\nconsider skin tone modifiers supported at least on a basic level. There was\nstill an outlier on macOS 10.12 where they don't have 🏇🏿\n`:horse_racing_toneX:` and I added a separate test for it.\n\n\n## Emoji discrepancies\n\n\n### Flag emoji\n\n\nOn Windows, all `:flag_xx:` emoji are rendered as two-letter international\ncharacters instead of a colorful flag like on the Apple ecosystem.\n\n\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-windows.png)\n\n\nOn Android 6, unknown flags are rendered as two-letter international\ncharacters.\n\n\n\u003Cimg\nsrcset=\"/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-android-6.png\n2x\">\n\n\nOn Android 7, unknown flags are rendered as white flags with blue question\nmarks on them.\n\n\n\u003Cimg\nsrcset=\"/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-android-7.png\n2x\">\n\n\n### Keycap emoji on Windows\n\n\nKeycap (digit) emoji are a bit broken on Windows but appear to be fixed on\nChrome 57+, 3️⃣4️⃣5️⃣\n\n\nBrowser | result\n\n--- | ---\n\nChrome 55.0.2883.87 (64-bit) ❌ |\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-55.0.2883.87.png)\n\nChrome 56.0.2924.87 (64-bit) ❌ |\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-56.0.2924.87.png)\n\nChromium 57.0.2984.0 (64-bit) ✅ |\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-57.0.2984.0.png)\n\nChrome 58.0.2999.4 (Official Build) canary (64-bit) ✅ |\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-58.0.2999.4.png)\n\n\n### Skin tone splitting from base emoji when width constrained\n\n\nStarting in Chrome 60+ (maybe 59.1+), the [🤼🏿 `:wrestlers_toneX:` and 🤝🏿\n`:handshake_toneX:` emoji started splitting/breaking into separate\npieces](https://gitlab.com/gitlab-org/gitlab-ce/issues/37654) (base emoji\nand skin tone) when their container is width constrained, causing\noverflow/wrapping.\n\n\nI created a [bug report on the Chromium\ntracker](https://bugs.chromium.org/p/chromium/issues/detail?id=764859) but\nit was closed a \"WontFix\" because the `wrestlers` and `handshake` emoji are\nno longer \"classified as Emoji_Base\" in the new International Components for\nUnicode (ICU) data which is used in Chrome.\n\n\nIt's understandable that those emoji are re-classified but they should\ndisplay as two separate characters in all scenarios. The 🤼🏿\n`:wrestlers_toneX:` emoji is consistently two characters now but the 🤝🏿\n`handshake_toneX` still only splits when width constrained, which is pretty\nsketchy.\n\n\nCheck the comparison in these screenshots (or [demo for Chrome prior to\n59.1](https://codepen.io/MadLittleMods/pen/dZMeXN)),\n\n\nWindows 10 | macOS\n\n--- | ---\n\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/skin-tone-splitting-windows-10.png)\n| \u003Cimg\nsrcset=\"/images/blogimages/journey-in-native-unicode-emoji/skin-tone-splitting-macos.png\n2x\">\n\n\n## Colliding with the object prototype `watch` function\n\n\nI ran into a small gotcha where some code was looking in an object map for\nthe `watch` ⌚ key. In Firefox, it was pulling\n[`Object.prototype.watch()`](https://developer.mozilla.org/en-US/docs/Archive/Web/JavaScript/Object.watch)\nand causing havoc.\n\n\n```js\n\nconst emojiAliases = { foo: 'bar' };\n\n\n// Expect `undefined` but got some function\n\nemojiAliases['watch']\n\n```\n\n\nI fixed this code up by using the safe lookup\n[`Object.prototype.hasOwnProperty`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/hasOwnProperty),\n\n\n```js\n\nconst emojiAliases = { foo: 'bar' };\n\n\nObject.prototype.hasOwnProperty.call(emojiAliases, 'watch')\n\n```\n\n\n[`Object.prototype.watch()`](https://developer.mozilla.org/en-US/docs/Archive/Web/JavaScript/Object.watch)\nis now removed in Firefox 58 and the current stable release is Firefox\n59.0.2 so you probably won't run into this yourself. But it's still\nadvisable to use `Object.prototype.hasOwnProperty()` for any current/future\ncollisions.\n\n\n## Things to improve\n\n\n### Custom emoji\n\n\nWe are working on adding custom emoji (with animated GIF support). You can\ntrack [this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/13931) and\nsee our [current iteration\nhere](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14609). It's\nnot merged yet because we need to ensure it works with [Geo\nreplication](https://docs.gitlab.com/ee/administration/geo/index.html.\n\n\n### Server-side rendered fallbacks\n\n\nTo speed up time to visible emoji (TTVE™ 😉) for people that have to fall\nback to image-based emoji, we could server-side render the fallback straight\naway.\n\n\nIn order to detect support from the server, on first page visit, we could\nset a cookie client-side (frontend JavaScript land) based on the unicode\nsupport map. Cookies are sent with each request and could be read on the\nserver.\n\n\nWe have some layers of cache on our Markdown rendering which makes this a\nbit difficult to do as we would need a response for both the `true` and\n`false` emoji support. Or we could post-process every request and update the\nrendered markdown HTML accordingly.\n\n\n### SVG fallbacks\n\n\nUsing the [EmojiOne\nSVG](https://github.com/emojione/emojione/tree/2.2.7/assets/svg) fallbacks\nwould be a nice step above the `.png` images currently. This would save on\nbandwidth and we would get nice, crisp fallback emoji.\n\n\nWe could even take it a step further and extract SVGs from the OS specific\nfonts. For older versions of Windows, we could use the Windows 10 fonts so\nthat everything has the appropriate signature black outline/stroke.\n\n\nThe EmojiOne SVGs fit nicely on macOS, so nothing to really change there.\n\n\n### Improving performance\n\n\nCurrently, we have to bundle a large `digests.json` file into our JavaScript\nbundles to get the necessary asset digest hash information to serve fallback\nimages.\n\n\nFor some quick-wins, we can remove those hashes to reduce the file size and\nserve the JSON payload async. There are some [more ideas in this\nissue](https://gitlab.com/gitlab-org/gitlab-ce/issues/39000).\n",[9],{"slug":931,"featured":6,"template":699},"journey-in-native-unicode-emoji","content:en-us:blog:journey-in-native-unicode-emoji.yml","Journey In Native Unicode Emoji","en-us/blog/journey-in-native-unicode-emoji.yml","en-us/blog/journey-in-native-unicode-emoji",{"_path":937,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":938,"content":943,"config":950,"_id":952,"_type":13,"title":953,"_source":15,"_file":954,"_stem":955,"_extension":18},"/en-us/blog/live-from-commit-news",{"title":939,"description":940,"ogTitle":939,"ogDescription":940,"noIndex":6,"ogImage":709,"ogUrl":941,"ogSiteName":685,"ogType":686,"canonicalUrls":941,"schema":942},"At GitLab Commit, our product roadmap, new partners, and a new milestone","Live from GitLab Commit: what’s next for our product strategy, expanded partnerships, and more.","https://about.gitlab.com/blog/live-from-commit-news","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"At GitLab Commit, our product roadmap, new partners, and a new milestone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":939,"description":940,"authors":944,"heroImage":709,"date":759,"body":946,"category":300,"tags":947},[945],"Valerie Silverthorne","\nOur first ever user conference – GitLab Commit in Brooklyn – has only been under way for a few hours and we’ve already made a number of key announcements. Not only did we secure an additional [$268 million in Series E funding](/blog/gitlab-series-e-funding/) to power our DevOps journey forward, we’ve also strengthened key partnerships, hit new milestones, and released details about important new features in the product.\n\n## GitLab is for everyone\n\nIn the next few releases, look for GitLab to add advanced integration with the [Amazon Elastic Kubernetes](https://aws.amazon.com/eks/) service (EKS), something our CEO [Sid Sijbrandij](/company/team/#sytses) told the audience during his keynote at Commit. Sid also said the number of customers using GitLab with [Terraform by HashiCorp](/blog/gitlab-hashicorp-terraform-vault-pt-1/) is increasing at an exciting rate. This Ops-focused solution leverages GitLab’s CI/CD automated pipelines to better achieve infrastructure as code, a.k.a. GitOps. Lastly, later this year, look out for GitLab to integrate with HashiCorp’s very popular [Vault Project](https://www.vaultproject.io/docs/internals/security.html) that will protect secrets throughout the pipeline.\n\nMoving forward, Sid stressed that we believe everyone has a seat at the table. \"We will make our vision of a complete DevSecOps a reality for each and every one of you,\" says Sid.\n\nAnd for those who’ve been hoping for auto remediation, it’s coming, says [Mark Pundsack](/company/team/#markpundsack), vice president of product strategy, during his keynote. There is work to be done but the vision is clear: Necessary but repetitive security work will be automated in the near future.\n\nThat’s not the end, however. Mark outlined a future where operations and security teams have their own customized dashboards on GitLab, giving them access to the same information as developers. “A ton of people are involved with the development and delivery of software,” says Mark. “That is the ultimate GitLab vision: Where every knowledge worker involved with software development and delivery uses a single application so they are on the same page with the rest of their team members.” Ultimately GitLab will expand to the business side, bringing project managers, designers, legal, and executives into the mix. Mark’s final message: “GitLab is for everyone.”\n\n## GitLab & VMWare\n\n[GitLab and VMWare](https://www.globenewswire.com/news-release/2019/09/17/1916738/0/en/GitLab-to-Enable-Cloud-Native-Transformation-on-VMware-Cloud-Marketplace.html) announced a collaboration making [GitLab now available on the VMWare Cloud marketplace](https://about.gitlab.com/2019-09-17-gitlab-on-vmware-cloud-marketplace/). Development teams will be able to deploy and run [GitLab Enterprise (Core)](/pricing/) on their VMWare environments with just a few clicks. GitLab is packaged and supported by Bitnami which provides curated applications for the VMWare marketplace. GitLab also supports [“Continuous Verification”](https://thenewstack.io/how-continuous-security-can-solve-the-cloud-protection-conundrum/) by integrating with VMWare Secure State, Wavefront by VMWare, and CloudHealth.\n\n## KDE chooses GitLab\n\nKDE, an international technology community creating free and open source software for desktop and portable computing, [chose GitLab](https://www.globenewswire.com/news-release/2019/09/17/1916731/0/en/GitLab-Adopted-by-KDE-to-Foster-Open-Source-Contributions.html) for its developers. The KDE team wants to offer additional infrastructure support and thinks GitLab will help boost development momentum.\n\nThe KDE community is one of the largest free software communities with more than 2,600 contributors. Now they’ll have access to an even wider range of development and code review features with GitLab’s DevOps platform to complement their tools currently in use. The KDE community will have additional options for accessible infrastructure for contributors, code review integration with Git, streamlined infrastructure and tooling, and an open communication channel with the upstream GitLab community.\n\n## Forbes 2019 Cloud 100\n\nWe’re pretty excited to mention we’ve been named to the [Forbes 2019 Cloud 100](https://www.forbes.com/sites/mnunez/2019/09/11/a-truck-tracker-a-coder-toolbox-and-a-unicorn-from-down-under-inside-this-years-cloud-100/#6148bcad5653), the definitive ranking of the top 100 private cloud companies in the world, published by Forbes in partnership with Bessemer Venture Partners and Salesforce Ventures. We’re the only cloud-agnostic DevOps platform, and [we came in at number 32](https://about.gitlab.com/2019-09-11-gitlab-named-leader-in-forbes-cloud-100-list/)!\n\nIf you like what you’re hearing out of GitLab Commit Brooklyn, then join us at our next [GitLab Commit in London](/events/commit/#) on October 9.\n",[268,278,948,949,9],"open source","releases",{"slug":951,"featured":6,"template":699},"live-from-commit-news","content:en-us:blog:live-from-commit-news.yml","Live From Commit News","en-us/blog/live-from-commit-news.yml","en-us/blog/live-from-commit-news",{"_path":957,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":958,"content":964,"config":971,"_id":973,"_type":13,"title":974,"_source":15,"_file":975,"_stem":976,"_extension":18},"/en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"title":959,"description":960,"ogTitle":959,"ogDescription":960,"noIndex":6,"ogImage":961,"ogUrl":962,"ogSiteName":685,"ogType":686,"canonicalUrls":962,"schema":963},"How we delivered more performant and robust task lists in GitLab","How simple checkboxes became a challenging engineering problem – and how we fixed it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668319/Blog/Hero%20Images/more-robust-task-lists.jpg","https://about.gitlab.com/blog/more-performant-and-robust-task-lists-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we delivered more performant and robust task lists in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"},{\"@type\":\"Person\",\"name\":\"Brett Walker\"}],\n        \"datePublished\": \"2019-04-05\",\n      }",{"title":959,"description":960,"authors":965,"heroImage":961,"date":968,"body":969,"category":802,"tags":970},[966,967],"Fatih Acet","Brett Walker","2019-04-05","[GitLab task lists](https://docs.gitlab.com/ee/user/markdown#task-lists) are\n\na list of checkboxes that you can include anywhere in GitLab where you can\nhave\n\n[GitLab Flavored Markdown\n(GFM)](https://docs.gitlab.com/ee/user/markdown#gitlab-flavored-markdown-gfm).\n\nThis includes issue descriptions and comments, as well as merge requests and\nepics.\n\nThey can be used for a list of items to consider when building a feature,\ntracking\n\ntasks for new employees to complete when onboarding, or even managing that\nlist\n\nof materials to purchase for your next home renovation. You can use them as\ntodo\n\nlists, and so checking off an item should be quick and satisfying.\n\n\n## More checkboxes, more problems\n\n\nIn the past, task lists with several items, even dozens, worked fairly well.\nCheck\n\nan empty checkbox, and a database record gets updated. The checkbox is then\ndisplayed\n\nas checked. Done.\n\n\nHowever, as the number of items increases, and the consequent\n\nmarkdown becomes more complex and longer, problems begin to appear. For\nexample,\n\nvisually the checkbox appears checked, but because updating the backend\ntakes a\n\nlonger time, if you checked another checkbox, the screen would refresh\nseveral seconds\n\nlater and the checkbox might then be unchecked. It soon became next to\nimpossible\n\nto go down a list and check off items without waiting 10 seconds between\neach one.\n\nYet another problem was that if other users were also checking items on the\nlist,\n\nyour change could be erased by them checking their item – they were\noverwriting\n\nyour data.\n\n\nIn [GitLab\n11.8](/releases/2019/02/22/gitlab-11-8-released/#performance-improvements)\n(released on Feb. 22, 2019),\n\nwe significantly increased the performance of task lists, as well as making\nthem\n\nmuch more robust. Here's how we did it:\n\n\n### Essentially we wanted:\n\n\n- Checking a checkbox to be as fast as possible.\n\n- Many users to concurrently interact with checkboxes in the same task list,\n\nwithout overwriting each other.\n\n\nBoth the performance and data integrity issues stemmed from the fact that we\nwere\n\nupdating the complete markdown. This meant that we changed the markdown\nsource in\n\nthe browser with the updated checkbox, sent it to the backend, where it was\nsaved\n\nto the database, and then re-rendered so that we could cache the new  and\nsend\n\nit back to the user.\n\n\n## A scalable solution\n\n\nBut what if we could update a single checkbox, and send only that to the\nbackend? That\n\nmight allow multiple users to check off as many tasks as they wanted,\nwithout clobbering\n\neach other. And what if we didn't have to do any markdown rendering at all?\nWe wouldn't\n\nhave to do any markdown processing, or process embedded issue links, or\nquery if\n\nlabels have changed, or any of the other advanced things that go on when\nupdating\n\nan issue. Performance would definitely increase in this case.\n\n\n### Frontend work\n\n\nOn the frontend, with only a small modification to the\n\n[deckar01/task_list](https://github.com/deckar01/task_list/commit/d1c96451df5fb8fdadc2cd080f65ffe2d2076a3a)\n\ngem we use, we were able to pass the exact text and line number in the\nmarkdown source\n\nfor the clicked task.\n\n[Wrap this piece of\ninformation](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/assets/javascripts/task_list.js#L63-68)\n\nin a new `update_task` parameter for our update endpoint, and send it to the\nbackend.\n\n\n### Backend work\n\n\nOn the backend,\n\n[we needed to\nverify](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/services/task_list_toggle_service.rb#L30-51)\n\nthat the task we were interested in still existed in exactly the same format\n– the text had to match\n\nthe exact line number in the source. This meant that even if someone changed\ntext above or below\n\nthe task item, as long as our line matched exactly, we could update that\nline in the latest source\n\nand save it without losing changes.\n\n\nIn order to update our cached HTML so that we wouldn't have to re-render it,\nwe turned on\n\nthe `SOURCEPOS` flag of the CommonMark renderer, which adds a\n`data-sourcepos` attribute to the HTML.\n\nFor example, a task item's might look like this:\n\n\n```\n\n\u003Cli data-sourcepos=\"1:1-1:12\" class=\"task-list-item\">\n  \u003Cinput type=\"checkbox\" class=\"task-list-item-checkbox\" disabled> Task 1\n\u003C/li>\n\n```\n\n\nWith a little [Nokogiri](https://nokogiri.org) magic we were able to find\nthe correct line\n\nand toggle the checked attribute.\n\n\nSince we updated the cache directly, we completely bypassed any markdown\nrendering,\n\nprocessing of special attributes, etc. Performance dramatically increased.\nHowever,\n\nsince we are not able to get it down to zero, we disabled the checkboxes\nwhile the\n\nrequest was in flight to ensure we weren't getting clicks on other tasks.\n\n\nThe result: a much more satisfying task list.\n\n\n[Brett Walker](https://gitlab.com/digitalmoksha) worked on the backend\nchanges and\n\n[Fatih Acet](https://gitlab.com/fatihacet) worked on the frontend changes in\nthis\n\nimprovement. See more details in [the GitLab\nissue](https://gitlab.com/gitlab-org/gitlab-ce/issues/19745).\n\n\nPhoto by [Glenn\nCarstens-Peters](https://unsplash.com/photos/RLw-UC03Gwc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/)\n\n{: .note}\n",[9,804],{"slug":972,"featured":6,"template":699},"more-performant-and-robust-task-lists-in-gitlab","content:en-us:blog:more-performant-and-robust-task-lists-in-gitlab.yml","More Performant And Robust Task Lists In Gitlab","en-us/blog/more-performant-and-robust-task-lists-in-gitlab.yml","en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"_path":978,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":979,"content":985,"config":991,"_id":993,"_type":13,"title":994,"_source":15,"_file":995,"_stem":996,"_extension":18},"/en-us/blog/moving-to-headless-chrome",{"title":980,"description":981,"ogTitle":980,"ogDescription":981,"noIndex":6,"ogImage":982,"ogUrl":983,"ogSiteName":685,"ogType":686,"canonicalUrls":983,"schema":984},"How GitLab switched to Headless Chrome for testing","A detailed explanation with examples of how GitLab made the switch to headless Chrome.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680270/Blog/Hero%20Images/headless-chrome-cover.jpg","https://about.gitlab.com/blog/moving-to-headless-chrome","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab switched to Headless Chrome for testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Greiling\"}],\n        \"datePublished\": \"2017-12-19\",\n      }",{"title":980,"description":981,"authors":986,"heroImage":982,"date":988,"body":989,"category":802,"tags":990},[987],"Mike Greiling","2017-12-19","GitLab recently switched from PhantomJS to headless Chrome for both our\n\nfrontend tests and our RSpec feature tests. In this post we will detail the\n\nreasons we made this transition, the challenges we faced, and the solutions\nwe\n\ndeveloped. We hope this will benefit others making the switch.\n\n\n\u003C!-- more -->\n\n\nWe now have a truly accurate way to test GitLab within a real, modern\nbrowser.\n\nThe switch has improved our ability to write tests and debug them while\nrunning\n\nthem directly in Chrome. Plus the change forced us to confront and clean up\na\n\nnumber of hacks we had been using in our tests.\n\n\n## Switching to headless Chrome from PhantomJS: background\n\n\n[PhantomJS](http://phantomjs.org) has been a part of GitLab's test framework\n\n[for almost five\nyears](https://gitlab.com/gitlab-org/gitlab-ce/commit/ba25b2dc84cc25e66d6fa1450fee39c9bac002c5).\n\nIt has been an immensely useful tool for running browser integration tests\nin a\n\nheadless environment at a time when few options were available. However, it\n\nhad some shortcomings:\n\n\nThe most recent version of PhantomJS (v2.1.1) is compiled with a\nthree-year-old\n\nversion of [QtWebKit](https://trac.webkit.org/wiki/QtWebKit) (a fork of\nWebKit\n\nv538.1 according to the user-agent string). This puts it on par with\nsomething\n\nlike Safari 7 on macOS 10.9. It resembles a real modern browser, but it's\nnot\n\nquite there. It has a different JavaScript engine, an older rendering\nengine,\n\nand a host of missing features and quirks.\n\n\nAt this time, GitLab supports [the current and previous major\n\nrelease](https://docs.gitlab.com/ee/install/requirements.html#supported-web-browsers)\nof\n\nFirefox, Chrome, Safari, and Microsoft Edge/IE. This puts PhantomJS and its\n\ncapabilities somewhere near or below our lowest common denominator. Many\nmodern\n\nbrowser features either [do not\nwork](http://phantomjs.org/supported-web-standards.html),\n\nor [require vendor prefixes](http://phantomjs.org/tips-and-tricks.html) and\n\npolyfills that none of our supported browsers require. We could selectively\n\nadd these polyfills, prefixes, and other workarounds just within our test\n\nenvironment, but doing so would increase technical debt, cause confusion,\nand\n\nmake the tests less representative of a true production environment. In most\n\ncases we had opted to simply omit them or hack around them (more on this\n\n[later](#trigger-method)).\n\n\nHere's a screenshot of the way PhantomJS renders a page from GitLab,\nfollowed\n\nby the same page rendered in Google Chrome:\n\n\n![Page Rendered by\nPhantomJS](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-phantomjs.png){:\n.shadow.center}\n\n\n![Page Rendered by Google\nChrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-chrome.png){:\n.shadow.center}\n\n\nYou can see in PhantomJS the filter tabs are rendered horizontally, the\nicons\n\nin the sidebar render on their own lines, the global search field is\n\noverflowing off the navbar, etc.\n\n\nWhile it looks ugly, in most cases we could still use this to run functional\n\ntests, so long as elements of the page remain visible and clickable, but\nthis\n\ndisparity with the way GitLab rendered in a real browser did introduce\nseveral\n\nedge cases.\n\n\n## What is headless Chrome\n\n\nIn April of this year, [news\nspread](https://news.ycombinator.com/item?id=14101233)\n\nthat Chrome 59 would support a [native, cross-platform headless\n\nmode](https://www.chromestatus.com/features/5678767817097216). It was\n\npreviously possible to simulate a headless Chrome browser in CI/CD [using\n\nvirtual frame buffer](https://gist.github.com/addyosmani/5336747), but this\n\nrequired a lot of memory and extra complexities. A native headless mode is a\n\ngame changer. It is now possible to run integration tests in a headless\n\nenvironment on a real, modern web browser that our users actually use!\n\n\nSoon after this was revealed, Vitaly Slobodin, PhantomJS's chief developer,\n\nannounced that the project [would no longer be\n\nmaintained](https://github.com/ariya/phantomjs/issues/15105#issuecomment-322850178):\n\n\n\u003Cdiv class=\"center\">\n\n\n\u003Cblockquote class=\"twitter-tweet\" data-cards=\"hidden\" data-lang=\"en\">\u003Cp\nlang=\"en\" dir=\"ltr\">This is the end - \u003Ca\nhref=\"https://t.co/GVmimAyRB5\">https://t.co/GVmimAyRB5\u003C/a>\u003Ca\nhref=\"https://twitter.com/hashtag/phantomjs?src=hash&amp;ref_src=twsrc%5Etfw\">#phantomjs\u003C/a>\n2.5 will not be released. Sorry, guys!\u003C/p>&mdash; Vitaly Slobodin\n(@Vitalliumm) \u003Ca\nhref=\"https://twitter.com/Vitalliumm/status/852450027318464513?ref_src=twsrc%5Etfw\">April\n13, 2017\u003C/a>\u003C/blockquote>\n\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\"\ncharset=\"utf-8\">\u003C/script>\n\n\n\u003C/div>\n\n\nIt became clear that we would need to make the transition away from\nPhantomJS at\n\nsome point, so we [opened up an\nissue](https://gitlab.com/gitlab-org/gitlab-ce/issues/30876),\n\ndownloaded the Chrome 59 beta, and started looking at options.\n\n\n### Frontend tests (Karma)\n\n\nOur frontend test suite utilizes the [Karma](http://karma-runner.github.io/)\n\ntest runner, and updating this to work with Google Chrome was surprisingly\n\nsimple ([here's the merge\nrequest](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12036)).\n\nThe\n[karma-chrome-launcher](https://github.com/karma-runner/karma-chrome-launcher)\n\nplugin was very quickly updated to support headless mode starting from\n\n[version\n2.1.0](https://github.com/karma-runner/karma-chrome-launcher/releases/tag/v2.1.0),\n\nand it was essentially a drop-in replacement for the PhantomJS launcher.\nOnce\n\nwe [re-built our CI/CD build\nimages](https://gitlab.com/gitlab-org/gitlab-build-images/merge_requests/41)\n\nto include Google Chrome 59 (and fiddled around with some pesky timeout\n\nsettings), it worked!  We were also able to remove some rather ugly\n\nPhantomJS-specific hacks that Jasmine required to spy on some built-in\nbrowser\n\nfunctions.\n\n\n### Backend feature tests (RSpec + Capybara)\n\n\nOur feature tests use RSpec and\n[Capybara](https://github.com/teamcapybara/capybara)\n\nto perform full end-to-end integration testing of database, backend, and\n\nfrontend interactions. Before switching to headless Chrome, we had used\n\n[Poltergeist](https://github.com/teampoltergeist/poltergeist) which is a\n\nPhantomJS driver for Capybara. It would spin up a PhantomJS browser instance\n\nand direct it to browse, fill out forms, and click around on pages to verify\n\nthat everything behaved as it should.\n\n\nSwitching from PhantomJS to Google Chrome required a change in drivers from\n\nPoltergeist to Selenium and\n[ChromeDriver](https://sites.google.com/a/chromium.org/chromedriver/).\n\nSetting this up was pretty straightforward. You can install ChromeDriver on\n\nmacOS with `brew install chromedriver` and the process is similar on any\ngiven\n\npackage manager in Linux. After this we added the `selenium-webdriver` gem\nto\n\nour test dependencies and configured Capybara like so:\n\n\n```ruby\n\nrequire 'selenium-webdriver'\n\n\nCapybara.register_driver :chrome do |app|\n  options = Selenium::WebDriver::Chrome::Options.new(\n    args: %w[headless disable-gpu no-sandbox]\n  )\n  Capybara::Selenium::Driver.new(app, browser: :chrome, options: options)\nend\n\n\nCapybara.javascript_driver = :chrome\n\n```\n\n\nGoogle says the [`disable-gpu` option is necessary for the time\n\nbeing](https://developers.google.com/web/updates/2017/04/headless-chrome#cli)\n\nuntil some bugs are resolved. The `no-sandbox` option also appears to be\n\nnecessary to get Chrome running inside a Docker container for [GitLab's\nCI/CD\n\nenvironment](/topics/ci-cd/). Google provides a [useful guide for working\nwith headless Chrome\n\nand\nSelenium](https://developers.google.com/web/updates/2017/04/headless-chrome).\n\n\nIn our final implementation we changed this to conditionally add the\n`headless`\n\noption unless you have `CHROME_HEADLESS=false` in your environment. This\nmakes\n\nit easy to disable headless mode while debugging or writing tests. It's also\n\npretty fun to watch tests execute on the browser window in real time:\n\n\n```shell\n\nexport CHROME_HEADLESS=false\n\nbundle exec rspec spec/features/merge_requests/filter_merge_requests_spec.rb\n\n```\n\n\n![Tests Executing in\nChrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/headlessless-chrome-tests.gif){:\n.shadow.center}\n\n\n### What is the differences between Poltergeist and Selenium?\n\n\nThe process of switching drivers here was not nearly as straightforward as\n\nit was with the frontend test suite. Dozens of tests started failing as soon\n\nas we changed our Capybara configuration, and this was due to some major\n\ndifferences in the way Selenium/ChromeDriver implemented Capybara's driver\nAPI\n\ncompared to Poltergeist/PhantomJS. Here are some of the challenges we ran\ninto:\n\n\n1.  **JavaScript modals are no longer accepted automatically**\n\n    We often use JavaScript `confirm(\"Are you sure you want to do X?\");` click\n    events when performing a destructive action such as deleting a branch or\n    removing a user from a group. Under Poltergeist a `.click` action would\n    automatically accept modals like `alert()` and `confirm()`, but under\n    Selenium, you now need to wrap these with one of `accept_alert`,\n    `accept_confirm`, or `dismiss_confirm`. e.g.:\n\n    ```ruby\n    # Before\n    page.within('.some-selector') do\n      click_link 'Delete'\n    end\n\n    # After\n    page.within('.some-selector') do\n      accept_confirm { click_link 'Delete' }\n    end\n    ```\n\n1.  **Selenium `Element.visible?` returns false for empty elements**\n\n    If you have an empty `div` or `span` that you want to access in your test,\n    Selenium does not consider these \"visible.\" This is not much of an issue\n    unless you set `Capybara.ignore_hidden_elements = true` as we do in our\n    feature tests. Where `find('.empty-div')` would have worked fine in\n    Poltergeist, we now need to use `visible: :any` to\n    select such elements.\n\n    ```ruby\n    # Before\n    find('.empty-div')\n\n    # After\n    find('.empty-div', visible: :any)\n    # or\n    find('.empty-div', visible: false)\n    ```\n\n    More on [Capybara and hidden elements](https://makandracards.com/makandra/7617-change-how-capybara-sees-or-ignores-hidden-elements).\n\n1.  {:#trigger-method} **Poltergeist's `Element.trigger('click')` method\ndoes not exist in Selenium**\n\n    In Capybara, when you use `find('.some-selector').click`, the element you\n    are clicking must be both visible and unobscured by any overlapping\n    element. Situations where links could not be clicked would sometimes occur\n    with Poltergeist/PhantomJS due to its poor CSS support sans-prefixes.\n    Here's one example:\n\n    ![Overlapping elements](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/overlapping-element.png){: .shadow.center}\n\n    The broken layout of the search form here was actually placing an invisible\n    element over the top of the \"Update all\" button, making it unclickable.\n    Poltergeist offers a `.trigger('click')` method to work around this.\n    Rather than actually clicking the element, this method would trigger a DOM\n    event to simulate a click. Utilizing this method was a bad practice, but\n    we ran into similar issues so often that many developers formed a habit\n    of using it everywhere. This began to lead to some lazy and sloppy test\n    writing. For instance, someone might use `.trigger` as a shortcut to click\n    on an link that was obscured behind an open dropdown menu, when a properly\n    written test should `.click` somewhere to close the dropdown, and _then_\n    `.click` on the item behind it.\n\n    Selenium does not support the `.trigger` method. Now that we were using a\n    more accurate rendering engine that won't break our layouts, many of these\n    instances could be resolved by simply replacing `.trigger('click')` with\n    `.click`, but due to some of the bad practice uses mentioned above, this\n    didn't always work.\n\n    There are of course some ways to hack a `.trigger` replacement. You could\n    simulate a click by focusing on an element and hitting the \"return\" key,\n    or use JavaScript to trigger a click event, but in most cases we decided to\n    take the time and actually correct these poorly implemented tests so that a\n    normal `.click` could again be used. After all, if our tests are meant to\n    simulate a real user interacting with the page, we should limit ourselves\n    to the actions a real user would be expected to use.\n\n    ```ruby\n    # Before\n    find('.obscured-link').trigger('click')\n\n    # After\n\n    # bad\n    find('.obscured-link').send_keys(:return)\n\n    # bad\n    execute_script(\"document.querySelector('.obscured-link').click();\")\n\n    # good\n    # do something to make link accessible, then\n    find('.link').click\n    ```\n\n1.  **`Element.send_keys` only works on focus-able elements**\n\n    We had a few places in our code where we would test out our keyboard\n    shortcuts using something like `find('.boards-list').native.send_keys('i')`.\n    It turns out Chrome will not allow you to `send_keys` to any element that\n    cannot be \"focused\", e.g. links, form elements, the document body, or\n    presumably anything with a tab index.\n\n    In all of the cases where we were doing this, triggering `send_keys` on the\n    body element would work since that's ultimately where our event handler was\n    listening anyway:\n\n    ```ruby\n    # Before\n    find('.some-div').native.send_keys('i')\n\n    # After\n    find('body').native.send_keys('i')\n    ```\n\n1.  **`Element.send_keys` does not support non-BMP characters (like emoji)**\n\n    In a few tests, we needed to fill out forms with emoji characters. With\n    Poltergeist we would do this like so:\n\n    ```ruby\n    # Before\n    find('#note-body').native.send_keys('@💃username💃')\n    ```\n\n    In Selenium we would get the following error message:\n\n    ```\n    Selenium::WebDriver::Error::UnknownError:\n        unknown error: ChromeDriver only supports characters in the BMP\n    ```\n\n    To work around this, we added [a JavaScript method to our test bundle that\n    would simulate input and fire off the same DOM events](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/app/assets/javascripts/test_utils/simulate_input.js)\n    that an actual keyboard input would generate on every keystroke, then\n    wrapped this with a [ruby helper](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/input_helper.rb)\n    method that could be called like so:\n\n    ```ruby\n    # After\n    include InputHelper\n\n    simulate_input('#note-body', \"@💃username💃\")\n    ```\n\n1.  **Setting cookies is much more complicated**\n\n    It's quite common to want to set some cookies before `visit`ing a page that\n    you intend to test, whether it's to mock a user session, or toggle a\n    setting. With Poltergeist, this process is really simple. You can use\n    `page.driver.set_cookie`, provide a simple key/value pair, and it will just\n    work as expected, setting a cookie with the correct domain and scope.\n\n    Selenium is quite a bit more strict. The method is now\n    `page.driver.browser.manage.add_cookie`, and it comes with two caveats:\n\n    - You cannot set cookies until you `visit` a page in the domain you intend\n      to scope your cookies to.\n    - Annoyingly, you cannot alter the `path` parameter (or at least we could\n      never get this to work), so it is best to set cookies at the root path.\n\n    Before you `visit` your page, Chrome's url is technically sitting at\n    something like `about:blank;`. When you attempt to set a cookie there, it\n    will refuse because there is no hostname, and you cannot coerce one by\n    providing a domain as an argument. The [Selenium\n    documentation](http://docs.seleniumhq.org/docs/03_webdriver.jsp#cookies)\n    suggests that you do the following:\n\n    > If you are trying to preset cookies before you start interacting with a\n    > site and your homepage is large / takes a while to load, an alternative is\n    > to find a smaller page on the site (typically the 404 page is small, e.g.\n    > `http://example.com/some404page`).\n\n    ```ruby\n    # Before\n    before do\n      page.driver.set_cookie('name', 'value')\n    end\n\n    # After\n    before do\n      visit '/some-root-path'\n      page.driver.browser.manage.add_cookie(name: 'name', value: 'value')\n    end\n    ```\n\n1.  **Page request/response inspection methods are missing**\n\n    Poltergeist very conveniently implemented methods like `page.status_code`\n    and `page.response_headers` which are also present in Capybara's default\n    `RackTest` driver, making it easy to inspect the raw response from the\n    server, in addition to the way that response is rendered by the browser. It\n    also allowed you to inject headers into the requests made to the server,\n    e.g.:\n\n    ```ruby\n    # Before\n    before do\n      page.driver.add_header('Accept', '*/*')\n    end\n\n    it 'returns a 404 page'\n      visit some_path\n\n      expect(page.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Selenium does not implement these methods, and [the authors do not intend\n    to add support for them](https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141#issuecomment-191404986),\n    so we needed to develop a workaround. Several people have suggested running\n    a proxy alongside ChromeDriver that would intercept all traffic to and from\n    the server, but this seemed to us like overkill. Instead, we opted to\n    create a [lightweight Rack middleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/lib/gitlab/testing/request_inspector_middleware.rb)\n    and a corresponding [helper class](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/inspect_requests.rb)\n    that would intercept the traffic for inspection. This is similar to our\n    [RequestBlockerMiddleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/testing/request_blocker_middleware.rb)\n    that we were already using to intelligently `wait_for_requests` to complete\n    within our tests. It works like this:\n\n    ```ruby\n    # After\n    it 'returns a 404 page'\n      requests = inspect_requests do\n        visit some_path\n      end\n\n      expect(requests.first.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Within the `inspect_requests` block, the Rack middleware will log all\n    requests and responses, and return them as an array for inspection. This\n    will include the page being `visit`ed as well as the subsequent XHR and\n    asset requests, but the initial path request will be the first in the array.\n\n    You can also inject headers using the same helper like so:\n\n    ```ruby\n    # After\n    inspect_requests(inject_headers: { 'Accept' => '*/*' }) do\n      visit some_path\n    end\n    ```\n\n    This middleware should be injected early in the stack to ensure any other\n    middleware that might intercept or modify the request/response will be\n    seen by our tests. We include this line in our test environment config:\n\n    ```ruby\n    config.middleware.insert_before('ActionDispatch::Static', 'Gitlab::Testing::RequestInspectorMiddleware')\n    ```\n\n1.  **Browser console output is no longer output to the terminal**\n\n    Poltergeist would automatically output any `console` messages directly into\n    the terminal in real time as tests were run. If you had a bug in the frontend\n    code that caused a test to fail, this feature would make debugging much\n    easier as you could inspect the terminal output of the test for an error\n    message or a stack trace, or inject your own `console.log()` into the\n    JavaScript to see what is going on. With Selenium this is sadly no longer the\n    case.\n\n    You can, however, collect browser logs by configuring Capybara like so:\n\n    ```ruby\n    capabilities = Selenium::WebDriver::Remote::Capabilities.chrome(\n      loggingPrefs: {\n        browser: \"ALL\",\n        client: \"ALL\",\n        driver: \"ALL\",\n        server: \"ALL\"\n      }\n    )\n\n    # ...\n\n    Capybara::Selenium::Driver.new(\n      app,\n      browser: :chrome,\n      desired_capabilities: capabilities,\n      options: options\n    )\n    ```\n\n    This will allow you to access logs with the following, i.e. in the event of\n    a test failure:\n\n    ```ruby\n    page.driver.manage.get_log(:browser)\n    ```\n\n    This is far more cumbersome than it was in Poltergeist, but it's the best\n    method we've found so far. Thanks to [Larry Reid's blog post](http://technopragmatica.blogspot.com/2017/10/switching-to-headless-chrome-for-rails_31.html)\n    for the tip!\n\n## Results\n\n\nRegarding performance, we attempted to quantify the change with a\n\nnon-scientific analysis of 10 full-suite RSpec test runs _before_ this\nchange,\n\nand 10 more runs from _after_ this change, factoring out any tests that were\n\nadded or removed between these pipelines. The end result was:\n\n\n**Before:** 5h 18m 52s\n\n**After:** 5h 12m 34s\n\n\nA savings of about six minutes, or roughly 2 percent of the total compute\ntime, is\n\nstatistically insignificant, so I'm not going to claim we improved our test\n\nspeed with this change.\n\n\nWhat we did improve was test accuracy, and we vastly improved the tools at\nour\n\ndisposal to write and debug tests. Now, all of the Capybara screenshots\n\ngenerated when a CI/CD job fails look exactly as they do on your own browser\n\nrather than resembling the broken PhantomJS screenshot above. Inspecting a\n\nfailing test locally can now be done interactively by turning off headless\n\nmode, dropping a `byebug` line into the spec file, and watching the browser\n\nwindow as you type commands into the prompt. This technique proved extremely\n\nuseful while working on this project.\n\n\nYou can find all of the changes we made in [the original merge request page\n\non\nGitLab.com](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12244).\n\n\n## What are some additional uses for headless Chrome?\n\n\nWe have also been utilizing headless Chrome to analyze frontend performance,\nand have found it to be useful in detecting issues.\n\n\nWe'd like to make it easier for other companies to embrace as well, so as\npart of the upcoming 10.3 release of GitLab we are releasing [Browser\nPerformance\nTesting](https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html).\nLeveraging [GitLab CI/CD](/solutions/continuous-integration/), headless\nChrome is launched against a set of pages and an overall performance score\nis calculated. Then for each merge request the scores are compared between\nthe source and target branches, making it easier detect performance\nregressions prior to merge.\n\n\n## Acknowledgements\n\n\nI sincerely hope this information will prove useful to anybody else looking\nto\n\nmake the switch from PhantomJS to headless Chrome for their Rails\napplication.\n\n\nThanks to the Google team for their very helpful documentation, thanks to\nthe\n\nmany bloggers out there who shared their own experiences with hacking\nheadless\n\nChrome in the early days of its availability, and special thanks to Vitaly\n\nSlobodin and the rest of the contributors to PhantomJS who provided us with\nan\n\nextremely useful tool that served us for many years. 🙇‍\n\n\n\u003Cstyle>\n\n\n.center {\n  text-align: center;\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n\n\ncode, kbd {\n  font-size: 80%;\n}\n\n\n\u003C/style>\n",[804,9,695],{"slug":992,"featured":6,"template":699},"moving-to-headless-chrome","content:en-us:blog:moving-to-headless-chrome.yml","Moving To Headless Chrome","en-us/blog/moving-to-headless-chrome.yml","en-us/blog/moving-to-headless-chrome",{"_path":998,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":999,"content":1005,"config":1015,"_id":1017,"_type":13,"title":1018,"_source":15,"_file":1019,"_stem":1020,"_extension":18},"/en-us/blog/new-typefaces-in-gitlab",{"title":1000,"description":1001,"ogTitle":1000,"ogDescription":1001,"noIndex":6,"ogImage":1002,"ogUrl":1003,"ogSiteName":685,"ogType":686,"canonicalUrls":1003,"schema":1004},"Get to know the new GitLab typefaces","Dive deep into the considerations for changing to GitLab Sans (Inter) and JetBrains Mono, including improved readability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669926/Blog/Hero%20Images/Cover3.png","https://about.gitlab.com/blog/new-typefaces-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get to know the new GitLab typefaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sascha Eggenberger\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2023-01-17\",\n      }",{"title":1000,"description":1001,"authors":1006,"heroImage":1002,"date":1009,"body":1010,"category":781,"tags":1011},[1007,1008],"Sascha Eggenberger","Jeremy Elder","2023-01-17","\nWe take the choice of typefaces very seriously around here. And, in the spirit of transparency, a [GitLab core value](https://handbook.gitlab.com/handbook/values/#transparency), we like to share our rationale for typeface changes. This blog introduces you to the new default typefaces in GitLab – GitLab Sans (Inter) and JetBrains Mono – and explores in detail why we chose them and how they will improve the user experience.\n\n## Introducing GitLab Sans and JetBrains Mono\n\nIn the recent [GitLab rebrand](/blog/devops-is-at-the-center-of-gitlab/), [Inter](https://rsms.me/inter/) was selected as the primary sans-serif typeface and we've adapted it for use in the GitLab user interface (UI) to have more continuity between the brand and product experience. It will be available for users in Release 15.8. Specifically for the UI, we've enabled disambiguation features (increased distinction between some characters) by default. Because of this change, we're including it under the name GitLab Sans in the open source package of GitLab. To complement GitLab Sans with a monospace typeface, we've chosen another open source option: [JetBrains Mono](https://www.jetbrains.com/lp/mono/).\n\nThe GitLab UI has historically relied on system fonts, like San Francisco on macOS and Segoe UI on Microsoft Windows. There are, however, limitations to using these that we'll cover in a moment.\n\n![GitLab Sans (Inter) and JetBrains Mono typefaces](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/gitlab-sans-jetbrainsmono.png){: .center}\nGitLab Sans (Inter) and JetBrains Mono sample\n{: .note.text-center}\n\n## Why the change?\n\nSo we've already mentioned brand continuity as a driving reason for the change, but let's step back a bit. During the rebrand process, Inter was one of many typefaces considered because it was open source and designed for UI. Choosing a font primarily designed for digital output might seem like an odd choice for branding and print application, but the primary extension and experience is the product itself. GitLab is digital-first, and the brand reflects it. Inter had all of the qualities and features we knew we could leverage to enhance and realize our vision for the UI.\n\nWe realize there's a lot of subjectivity wrapped up in a change like this. Visual updates are, well, highly visible, but we believe they have to be rooted in objective considerations that lead to adding real value, so here are a few other aspects we evaluated and will cover in greater detail:\n\n- **Less is more** - How can we limit certain choices in ways that enable more meaningful ones?\n- **Consistency** - Can we create more harmony within a single view, streamline the experience across platforms, and reinforce the brand?\n- **Enhance the content** - Can content be more readable, discernable, and generally consumable?\n\n### Less is more\n\nTypography is a crucial part of the GitLab UI, if not _the_ most crucial part. As we continue to refine and beautify the experience, it's apparent that more control over the typography would yield a better experience not only for our users, but also the ones creating the experiences — our internal product, design, and development teams. System fonts have led to everything from false positive bug reports to visual regression errors on both sides. More choice — especially when systems are choosing — doesn't always lead to better experiences.\n\nWith multiple system fonts in play, we choose compromises, not enhancements. For example, asking what alignment works best for _most_ system fonts in a button instead of what alignment works best for _this_ font. Or, what weight should we use when not all system fonts have the same available options instead of what weight creates the right hierarchy for this content. With fewer typeface options we have more ability to make meaningful decisions about disambiguation, visual weight, language support, hierarchy, type scales, and so much more.\n\n### Consistency\n\nAn experience has multiple facets: a single view or screen, a flow between multiple views, a transition from reading to editing, or a switch from settings to documentation. Consistency should happen not only within each of these, but also across them. Consistency in a single view means hierarchy, balance, and harmony. In a flow, consistency establishes patterns and understanding. When contexts change, consistency brings familiarity and enhances trust. Typography is an important aspect of all of these.\n\nInconsistencies add up and lead to design, tech, and experience debt. There are known consistency problems with system fonts, for example, in Firefox on macOS, San Francisco has tighter letter spacing than on Chrome or Safari. This leads to different experiences across browsers, and this is just for one system font.\n\n![Comparing system fonts to show varied x-height](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/compare-x-height.png){: .center}\nVaried x-heights of system fonts\n{: .note.text-center}\n\nOptically, system fonts are noticeably different in size. However, the difference is more visible when you compare the length of each due to character width, weight, and kerning (the space between characters). This impacts everything from truncation and component width, to wrapping and legibility.\n\n![Comparing system fonts to show varied width](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/compare-width.png){: .center}\nVaried width of system fonts\n{: .note.text-center}\n\nMenlo has been used as our monospaced typeface. It appears bigger than many sans-serif typefaces when using the same font size. To counter that issue, we had downscaled its size by one pixel to make it appear as the same optical size. This added unnecessary bloat to styles and is also not foolproof since sans-serif system fonts also vary.\n\nInter and JetBrains Mono have nearly identical x-height, which allows us to remove all of the downscaling overrides and more generally handle text styles consistently. While both typefaces have specific use cases, they’re almost always present next to each other in the UI, making cohesiveness that much more important.\n\n![GitLab Sans (Inter) and JetBrains Mono x-height comparison](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/gitlab-sans-jetbrainsmono-x-height.png){: .center}\nGitLab Sans and JetBrains Mono with similar x-height\n{: .note.text-center}\n\nBy reducing our typeface options, we're working towards consistency in so many ways we haven't before, everything from brand to product, product to documentation, and browser to browser. Consistency is not the same as uniformity though, and nor should it inhibit preference, but by creating a baseline those things can have room for more thoughtful approaches in the future too.\n\n### Enhance the content\n\nAs mentioned earlier, typography is a crucial part of the UI, and arguably most of the content is in text form. Whether communication or code, status or state, the typeface is the delivery vehicle for the content. GitLab Sans and JetBrains Mono give us better control over readability.\n\nBoth typefaces include variable webfont and contextual features, which means that the font weight and other settings can be finely tuned to enhance visual weight, hierarchy, and contextual alternates. For GitLab Sans, we've enabled the disambiguation feature set to ensure readability is a top priority. Disambiguation is used to avoid common character confusion. For example, by using the feature set [cv05](https://rsms.me/inter/lab/?feat-cv05=1) (lowercase L with tail for disambiguation), you can easily distinguish between the capital “I” and the lowercase “L” (see image below). We had discussed using either [ss04](https://rsms.me/inter/lab/?feat-ss04=1) (disambiguation without slash zero) or cv05 and decided to go with the latter for a simple, modern look.\n\n![Inter Typface character disambiguation](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/inter-disambiguation.png){: .center}\nInter disambiguation options from left to right: Default, without slashed zero (ss04), lowercase L with tail (cv05)\n{: .note.text-center}\n\nGitLab uses a condensed UI, meaning more content in less space and typically at smaller sizes. Inter is popular for a reason, more likely dozens, but the most applicable to GitLab is that it’s designed specifically for UI. On the [website](https://rsms.me/inter/) it states, “Inter is a typeface carefully crafted & designed for computer screens.” With a tall x-height, contextual alternates, tabular numbers, and more, Inter enables us to actually make more meaningful typography decisions that impact readability.\n\nSimilarly, JetBrains Mono has a tall x-height, which increases readability at smaller sizes, and it has a normal character width to keep more characters on a single line which limits wrapping. During our exploration, we found that typefaces like Menlo, Fira Code, Source Code, or Noto Sans Mono either have shorter x-heights or wider characters that lead to size or spacing compromises.\n\nWith these typefaces in place we've started a deep dive into our type scales and updating design resources in Figma too. The upcoming work on type scales, in particular, will provide more consistency and refinement.\n\n## Other considerations\n\nGitLab is an [open core](/blog/gitlab-is-open-core-github-is-closed-source/) product, which means the core of our product is open source, so selecting typefaces that are also open source was a crucial part of the decision. \n\nAnytime you opt to distribute your own resources versus using what's already available to the system the question of performance comes up. And while it's true that we're increasing the payload by a few kilobytes, we're able to rely on modern CSS and browser handling for delivery and caching. At the same time, we're reducing the CSS by removing styles that have been added to counter aforementioned compromises. This is something we'll continue to evaluate and optimize.\n\nAnd speaking of distribution, we're [packaging the fonts](https://www.npmjs.com/package/@gitlab/fonts) to make it easier for all of our properties to consume. This means we're also able to leverage the same resources in our design tooling.\n\nLastly, we know that changes like this have the benefit (or downside, depending on how you look at it) of exposing other inconsistencies in the UI that need to be addressed. While it seems counterintuitive to release an update that potentially introduces visual regression, we consider it as the dye in the water to let us know what else we have to fix as we continue to work towards a single source of truth for typography styles.\n\n## What's next?\n\nAs the typography changes are being rolled out, we’re working through feedback and addressing any potential regressions. Along with type scale updates, we're going to evaluate headings throughout the product to ensure heading levels align with correct Document Object Model (DOM) structure, visual weight, and styles. In short, our typography decisions are interdependent and foundational for the overall experience. By limiting typeface options, we’re removing the limits of how hard we can make typography work so that we can further refine the interface, bring harmony to the UI, and make content more consumable so that using GitLab is more productive and enjoyable. \n\nIf you’d like to provide feedback or contribute, please use this [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/386205).\n",[1012,9,948,1013,1014,848],"design","product","UI",{"slug":1016,"featured":6,"template":699},"new-typefaces-in-gitlab","content:en-us:blog:new-typefaces-in-gitlab.yml","New Typefaces In Gitlab","en-us/blog/new-typefaces-in-gitlab.yml","en-us/blog/new-typefaces-in-gitlab",{"_path":1022,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1023,"content":1029,"config":1038,"_id":1040,"_type":13,"title":1041,"_source":15,"_file":1042,"_stem":1043,"_extension":18},"/en-us/blog/open-source-nasa-gl",{"title":1024,"description":1025,"ogTitle":1024,"ogDescription":1025,"noIndex":6,"ogImage":1026,"ogUrl":1027,"ogSiteName":685,"ogType":686,"canonicalUrls":1027,"schema":1028},"MRI Technologies used GitLab for unified toolchains to NASA","Live from GitLab Commit: NASA will be flying Kubernetes clusters to the moon and GitLab is helping.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678434/Blog/Hero%20Images/nasagitlab.jpg","https://about.gitlab.com/blog/open-source-nasa-gl","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":1030,"description":1025,"authors":1031,"heroImage":1026,"date":759,"body":1032,"category":1033,"tags":1034},"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA",[945],"\nNASA can put [Rovers on Mars](https://mars.nasa.gov/mer/), but a complex legacy software system proved a bit of a challenge. Speaking at GitLab Commit in Brooklyn, [Marshall Cottrell](https://www.linkedin.com/in/marshall-cottrell-27b385181) of [MRI Technologies](https://www.mricompany.com) explained how the company teamed up with NASA to launch the space agency into the era of modern application development using Kubernetes and GitLab.\n\nIn September 2018 MRI began work on a new software development platform called APPDAT. \"It's the only platform taking a totally 'fresh approach' to application development and data science activities within the Agency,\" Marshall said. The team's challenge was to update an Oracle-based legacy SCM solution using open source technologies and APIs. At the time NASA had no toolchains to support CI/CD during development and lots of silos of information. \"There was no mechanism for us to disseminate innovations, best practices, or what we learned,\" Marshall said. NASA needed a unified toolchain and platform for software delivery. \"GitLab was chosen as the platform source control management solution because it is the only product in this space that integrates all stages of the DevSecOps lifecycle.\"\n\n## A laser focus helps\n\nPerhaps not surprisingly MRI had ambitious goals for APPDAT, Marshall explained. The overarching hope was to build an automated DevOps platform that served as the single source of truth. Until MRI got involved, NASA had no way to actually \"own\" the software development process; teams operated in a piecemeal fashion, choosing contractors and solutions based on situational needs rather than looking at the big picture. Those decisions left NASA subject to potentially \"abusive behavior,\" Marshall explained.\n\nSo MRI laid out a number of goals:\n\n- Empower teams to fully manage the resources they support\n- Demonstrate and promote fully open project management and collaboration\n- Create a sandbox for protoyping with no barriers to entry\n- Assemble an API and data economy that would eliminate silos and promote reusability\n- Establish platform-level security controls with a goal of \"compliant by fault\"\n\nTo get there, MRI emphasized collaboration and tried to reach out to the \"forward-leaning\" customers and individual civil servant developers, engineers and researchers who were eager to contribute. The team adhered strictly to cloud native, Zero Trust and open source approaches and, in the end, came up with a Kubernetes platform that met the space agency's needs for today and in the future. The technology choices were important, but so was the time spent laying the groundwork for a culture change. \"Many modernization proposals try to meet everyone where they're at,\" Marshall explained. \"A more opinionated approach allows us to provide a succinct and unified toolchain that all parties can contribute to, evolve, and improve over time.\"\n\nToday the 61-year old space agency has a modern platform where developers can easily collaborate with non-developers, no complex tooling is required, and context switching is a thing of the past, Marshall said. APPDAT syncs from the agency's existing SCM solutions so everyone was able to continue to use the same tools.\n\nPerhaps most exciting, NASA's plans to have astronauts established on the moon by 2024 as part of the [Artemis program](https://www.nasa.gov/what-is-artemis). That will include a data center, and Marshall is confident Kubernetes will be part of the launch.\n\n\"We’ve already begun to change minds at NASA and you can do it at your enterprise too,\" Marshall said. His last best advice: Play the long game, only innovate when it makes things easier, and a bottom-up approach is an easy way to make friends.\n\nWatch Marshall's entire presentation here:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/RsUw4Ueyn-c\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nDon't miss out on the chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/).\n\nCover image by [David Torres](https://unsplash.com/@djjabbua) on [Unsplash](https://unsplash.com/)\n{: .note}\n","open-source",[1035,1036,1037,948,9],"GKE","kubernetes","user stories",{"slug":1039,"featured":6,"template":699},"open-source-nasa-gl","content:en-us:blog:open-source-nasa-gl.yml","Open Source Nasa Gl","en-us/blog/open-source-nasa-gl.yml","en-us/blog/open-source-nasa-gl",{"_path":1045,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1046,"content":1052,"config":1060,"_id":1062,"_type":13,"title":1063,"_source":15,"_file":1064,"_stem":1065,"_extension":18},"/en-us/blog/placebo-lines-on-the-pipeline-graph",{"title":1047,"description":1048,"ogTitle":1047,"ogDescription":1048,"noIndex":6,"ogImage":1049,"ogUrl":1050,"ogSiteName":685,"ogType":686,"canonicalUrls":1050,"schema":1051},"Placebo Lines on the Pipeline Graph","Have you noticed the connecting lines missing on your pipelines lately? Here's why","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679507/Blog/Hero%20Images/ci-cd.png","https://about.gitlab.com/blog/placebo-lines-on-the-pipeline-graph","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Placebo Lines on the Pipeline Graph\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Beckham\"}],\n        \"datePublished\": \"2021-05-11\",\n      }",{"title":1047,"description":1048,"authors":1053,"heroImage":1049,"date":1055,"body":1056,"category":1057,"tags":1058},[1054],"Sam Beckham","2021-05-11","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nHave you ever pressed the close door button on the elevator, in the hope that you'll save a few precious seconds?\nOr got frustrated at the person stood next to you at the cross-walk, neglecting to press the button?\nWell, maybe they know something you don't, or perhaps you know this already.\nMany buttons in our society lie to us.\n[David McRaney](https://youarenotsosmart.com/2010/02/10/placebo-buttons/) dubbed these, \"Placebo buttons\" and they're everywhere.\nThose elevator doors won't close any faster and the cross-walk button has no effect on the lights.\nThe only lights they control are the lights on the buttons themselves.\nThey give you the feedback you crave, but that's all they're doing.\n\nThese placebos aren't constrained to the physical world, they're prevalent in [UI design](/blog/the-evolution-of-ux-at-gitlab/) too.\nFrom literal placebo buttons like [YouTube's downvote](https://www.quora.com/Does-downvoting-a-comment-on-YouTube-even-do-anything), to more subtle effects like Instagram always [pretending to work](https://www.fastcompany.com/1669788/the-3-white-lies-behind-instagrams-lightning-speed), or progress bars that have a [fixed animation](https://www.theatlantic.com/technology/archive/2017/02/why-some-apps-use-fake-progress-bars/517233/).\nThey're everywhere if you know where to look.\n\nAt GitLab, we created a placebo of our own in one of our core features; the pipeline graph.\n\nThose of you who have used our pipeline graph, will be familiar with its appearance.\nThere's a series of jobs, grouped by stages, connected by a series of lines depicting the relationships between the jobs.\nBut these lines might be lying to you.\nThese lines are indiscriminately drawn between each job in a stage, regardless of their relationship.\nThese lines are placebos.\n\n![The old pipeline rendering with lines connecting every job in a stage](https://about.gitlab.com/images/blogimages/placebo-lines_old-graph.png)\n\nThis wasn't a problem to begin with.\nA basic pipeline has several jobs across a handful of stages.\nJobs in each stage would run parallel to each other, but each stage would run sequentially.\nIn the image shown above, all the jobs in the test stage would trigger at the same time. Once those jobs had finished, all the jobs in the build stage would trigger.\nWe used rudimentary CSS to draw lines connecting each job in one stage to each job in the next.\nThese lines weren't calculated based on their connections, but still reflected the story they were telling.\n\nSince the introduction of `needs` relationships in [v12.2](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/47063), pipelines got a bit more complicated.\nNow you could configure a job in a later stage to trigger as soon as a job in an earlier stage completed.\nLooking at our old example, we could set the API deployment to run as soon as our spec tests passed.\nThis skips the remaining tests and the entire build stage, turning our lines into pretty little liars.\n\nWe had many internal discussions about these lines, and how to show the relationships between jobs.\nThere's the [`needs` visualization](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/#needs-visualization), which does an excellent job of displaying these relationships, but the main pipeline graph was still inaccurate.\nFor the past few months, we've been [refactoring the pipeline graph](https://gitlab.com/gitlab-org/gitlab/-/issues/276949), giving it a new lease of life and fixing some of its issues along the way.\nOne of those issues were the faked lines.\nIn the new version, we can accurately draw lines between jobs.\nLines that actually depict the relationships jobs have with each other.\nNow the lines no-longer lie!\n\n![The newer pipeline graph showing the correct needs links between jobs](https://about.gitlab.com/images/blogimages/placebo-lines_new-graph.png)\n\nThe above image shows an unreleased version of the pipeline graph.\nYou can see the lines drawn between the jobs to show that the `deploy:API` job can start as soon as the `rspec` job is successful.\nSomething the old lines (shown earlier in this post) would have been unable to depict.\n\nOne unfortunate downside of this is that these lines can be quite expensive to calculate.\nThey're actual DOM nodes, drawn deliberately and placed precisely.\nOn smaller graphs this isn't a problem, but some of our initial tests have found pipelines with a potential 8000+ job connections.\nThat kind of calculation would grind the browser to a halt, and nobody wants that.\n\nAt GitLab, we believe in boring solutions.\nWe make the simple change that sets us on the path towards where we want to be.\nShip it, get feedback, and iterate.\nSo that's what we did.\nIn the first phase of this rollout, we shipped the new pipeline graph with no lines connecting the jobs.\nWe don't have to worry about the expensive calculations, and we still get to roll out the refactored pipeline graph.\n\n![The current (v13.11) pipeline graph showing no links between jobs](https://about.gitlab.com/images/blogimages/placebo-lines_current-graph.png)\n\nWe know some of you will miss them, but fear not.\nBoring solutions are just technical debt if you don't iterate on them.\nSo the [improved lines are coming](https://gitlab.com/groups/gitlab-org/-/epics/4509) in a future release, along with several other improvements to the pipeline graph.\nWe're already starting to roll out the new [Job Dependencies](https://gitlab.com/gitlab-org/gitlab/-/issues/298973) view which shows the jobs in a (much closer to) execution order.\nStay tuned for more updates, and watch [Sarah Groff Hennigh Palermo's talk](https://www.youtube.com/watch?v=R2EKqKjB7OQ) for the technical side of this effort and a deeper dive into some of the decisions we made.\n","unfiltered",[826,9,1059,1012],"agile",{"slug":1061,"featured":6,"template":699},"placebo-lines-on-the-pipeline-graph","content:en-us:blog:placebo-lines-on-the-pipeline-graph.yml","Placebo Lines On The Pipeline Graph","en-us/blog/placebo-lines-on-the-pipeline-graph.yml","en-us/blog/placebo-lines-on-the-pipeline-graph",{"_path":1067,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1068,"content":1074,"config":1082,"_id":1084,"_type":13,"title":1085,"_source":15,"_file":1086,"_stem":1087,"_extension":18},"/en-us/blog/play-reviewer-roulette",{"title":1069,"description":1070,"ogTitle":1069,"ogDescription":1070,"noIndex":6,"ogImage":1071,"ogUrl":1072,"ogSiteName":685,"ogType":686,"canonicalUrls":1072,"schema":1073},"Reviewer roulette: Easy way to find merge request reviewers","Finding the right reviewer for a merge request can be tough. Reviewer Roulette makes the decision easier – by making it random!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672195/Blog/Hero%20Images/play-reviewer-roulette.jpg","https://about.gitlab.com/blog/play-reviewer-roulette","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Tang\"}],\n        \"datePublished\": \"2018-06-28\",\n      }",{"title":1075,"description":1070,"authors":1076,"heroImage":1071,"date":1078,"body":1079,"category":802,"tags":1080},"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request",[1077],"Dennis Tang","2018-06-28","\n\nGitLab is [growing quickly], and [constantly looking for more talented people] to join the team. While exciting, it can be tough to keep track of who's who, especially when you're new to the company.\n\nSo how do you know who to contact if you need a pair of eyes on your merge request?\n\n## Meet Reviewer Roulette!\n\nReviewer Roulette is a Slack slash command to help GitLab team-members randomly select a person from a given team, which can be especially useful as multiple teams work together to deliver features in a single merge request.\n\n![Demo of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/demo.gif){: .shadow.center.medium}\n\n---\n\n## The idea\n\nIt's quite common to find that your issue or merge request will have multiple labels to associate different feature areas and teams that are contributing to them. As someone who's recently joined GitLab, I'm still getting to know [all the different teams and people] that work at GitLab. That said, I'm working on a feature with the [CI/CD](/topics/ci-cd/) or discussion team, who should I reach out to if I have questions or need a review of my work?\n\n![Various labels on Merge Requests in gitlab-ce](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/labels.png){: .shadow.center.medium}\n\nThe idea arose from the [frontend team weekly call] where [Tim Zallmann] reminded us that, \"Everyone on the frontend team is a reviewer.\" The team previously had a microservice built by [Luke Bennett] for this, however, it's no longer online. Beyond that, wouldn't it be convenient to simply type a command in Slack to be suggested someone to ping for a review?\n\nI can say with confidence that GitLab is a company that truly exemplifies its values, and I was empowered by the value of [collaboration] to build something that could help our team (and others!) find reviewers. I couldn't be the only one who had this problem!\n\n> **Do it yourself** Our collaboration value is about helping each other when we have questions, need critique, or need help. No need to brainstorm, wait for consensus, or do with two what you can do yourself.\n\nI quickly went to work to (hastily) put together a proof-of-concept to see if it would something that people would want to use.\n\n## Decision fatigue, be gone!\n\n![Screenshot of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/screenshot.png){: .shadow.right.small.wrap-text}\n\nIt was presented to the frontend team and received warmly, and many people were keen to contribute and also [suggest ideas] that would make it even more useful!\n\nAlthough it was originally intended for the frontend group, since I was building it from scratch, it was very easy to make the decision to have it work for all engineering teams.\n\nWith Reviewer Roulette, I don't have to ping entire Slack channels or guess from our team page to try to find _someone_ to talk to.\n\nAdditionally, it provides a number of other benefits such as:\n\n1.  It promotes a more balanced distribution of reviewers amongst the team.\n    * Less experienced reviewers have more opportunities to do code reviews\n    * More experienced reviewers are not as heavily relied on\n1.  It allows more team members to learn more about parts of the codebase they may not be as familiar with, increasing the knowledge of the team overall\n1.  It provides more opportunities to apply our [code review guidelines] or [frontend style guides] to all team members\n1.  It reduces bias towards reviewers that you may unconsciously prefer to select\n\nOf course, we have our various subject matter experts such as our [frontend domain experts] and [gitlab-ce maintainers] who may provide the best insight for a given topic, but it's good to randomly select reviewers by default!\n\n## How it's made\n\nWhen it came to thinking about how to build Reviewer Roulette, it wasn't so much about the tech, than it being about being enabled to create something that will benefit the team.\n\nEmbracing our value of [efficiency], the solution is very much a boring one. It's a simple Node.js application utilizing `js-yaml` and `express` to be able to search our [team structure file] and respond to Slack's slash command requests properly.\n\n## What's next\n\nReviewer Roulette is seeing regular usage, and has [plenty of features planned] to hopefully increase its usefulness.\n\nWhile originally intended for engineering, it can [help the entire company] out. In addition to our [Coffee Break calls], we also have [a step in our onboarding process] to meet five different people across different teams and countries. That's something that Reviewer Roulette could easily help with!\n\nWe also plan on moving it to the frontend [GKE] cluster, and activating [Auto DevOps] to make builds and deployments painless.\n\nIf you're interested in checking it out, feel free to take a look at the [project]! Perhaps it might be useful to you and your team?\n\n## Share your thoughts!\n\nIf there's interest in using Reviewer Roulette for your community contribution to GitLab projects, let us know in the comments and we can release it on Slack for everyone to use!\n\nWhat do you think of Reviewer Roulette? Is this something you would use for your team? How do you pick people for reviewing?\n\n[Photo](https://unsplash.com/photos/w6OniVDCfn0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[growing quickly]: /company/okrs/#ceo-great-team-active-recruiting-for-all-vacancies-number-of-diverse-per-vacancy-real-time-dashboard\n[constantly looking for more talented people]: /jobs/\n[all the different teams and people]: /company/team/\n[frontend domain experts]: /handbook/engineering/frontend/#frontend-domain-experts\n[gitlab-ce maintainers]: /handbook/engineering/projects/#gitlab-ce\n[frontend team weekly call]: /handbook/engineering/frontend/#frontend-group-calls\n[Tim Zallmann]: /company/team/#tpmtim\n[Luke Bennett]: /company/team/#__lukebennett\n[suggest ideas]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[plenty of features planned]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[efficiency]: https://handbook.gitlab.com/handbook/values/#efficiency\n[team structure file]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/team.yml\n[auto devops]: https://docs.gitlab.com/ee/topics/autodevops/\n[coffee break calls]: /company/culture/all-remote/tips/#coffee-chats\n[a step in our onboarding process]: https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md#day-4-morning-social\n[help the entire company]: https://gitlab.com/dennis/reviewer-roulette/issues/12\n[gke]: /partners/technology-partners/google-cloud-platform/\n[project]: https://gitlab.com/dennis/reviewer-roulette/\n[collaboration]: https://handbook.gitlab.com/handbook/values/#collaboration\n[code review guidelines]: https://docs.gitlab.com/ee/development/code_review.html\n[Frontend style guides]: https://docs.gitlab.com/ee/development/fe_guide/index.html#style-guides\n",[804,1081,9],"collaboration",{"slug":1083,"featured":6,"template":699},"play-reviewer-roulette","content:en-us:blog:play-reviewer-roulette.yml","Play Reviewer Roulette","en-us/blog/play-reviewer-roulette.yml","en-us/blog/play-reviewer-roulette",{"_path":1089,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1090,"content":1096,"config":1102,"_id":1104,"_type":13,"title":1105,"_source":15,"_file":1106,"_stem":1107,"_extension":18},"/en-us/blog/the-trouble-with-technical-interviews",{"title":1091,"description":1092,"ogTitle":1091,"ogDescription":1092,"noIndex":6,"ogImage":1093,"ogUrl":1094,"ogSiteName":685,"ogType":686,"canonicalUrls":1094,"schema":1095},"The main problem with technical interviews","Forget the coding exercise. Here's how to create realistic scenarios for engineering candidates in technical interviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681148/Blog/Hero%20Images/nycbrooklyn.jpg","https://about.gitlab.com/blog/the-trouble-with-technical-interviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The trouble with technical interviews? They aren't like the job you're interviewing for\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-03-19\",\n      }",{"title":1097,"description":1092,"authors":1098,"heroImage":1093,"date":1099,"body":1100,"category":802,"tags":1101},"The trouble with technical interviews? They aren't like the job you're interviewing for",[905],"2020-03-19","\n\nInterviewing for an engineering job in the tech world can mean [you’ll be asked all sorts of questions](https://stackify.com/devops-interview-questions/). Sometimes, the job interview questions can be pretty straightforward: “Tell me about a time that you have implemented an effective monitoring solution for a production system.” Other times, the questions are impossible to answer and designed to spark your creativity: “How many windows are in New York City?” After passing the initial interview, the applicant or candidate graduates to the next tier of interviewing: The often-dreaded technical interview.\n\n## What is a technical interview?\n\nA technical interview is one that is conducted to gauge a candidate’s skill level for positions in the information technology, engineering, and science fields. It may also determine how much a candidate knows in more niche areas of a company, such as marketing, sales, and HR.\n\n## How to prepare for a technical Interviews\n\nProspective engineers often face a challenge when it comes to preparing for the technical interview, largely because there is no playbook for how companies set them up technical. It’s unclear whether to prepare by memorizing many different topics, or focusing on specific projects. Is it better to practice with a computer or a peer engineer? There are an overwhelming number of resources available online, but with little clarity as to what the standard is for a technical interview and little guidance from the company on what to expect, most of the time engineers start technical interviews in the dark.\n\nInconsistencies in the technical interview process isn’t just a job candidate problem. In fact, many companies struggle to set up a technical interview process that is effective, equitable, and allows the hiring manager to compare candidates. The problem with technical interviewing compounds when a company is experiencing rapid growth.\n\n## What are the challenges of conducting technical interviews at a growing company\n\n\"Imagine you had a hiring target of doubling your team size and all your interviews are conducted remotely. Welcome to GitLab,\" says Clement Ho, [frontend engineering manager on the Monitor: Health team](/company/team/#ClemMakesApps) at GitLab.\n\n![Hiring chart shows GitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020](https://about.gitlab.com/images/blogimages/fei_hiringchart.jpg){: .shadow.medium.center}\n\nGitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020.\n{: .note.text-center}\n\nWe identifed three core challenges with orchestrating technical interviews as GitLab grows.\n\n1. We didn't have enough interviewers for the pipeline of candidates.\n2. Our technical interviewing process was inconsistent and even a little biased.\n3. It was difficult to measure whether or not we were raising the bar.\n\n\"And by raising the bar, I mean making sure each candidate that joins the team makes the team better,\" says Clement.\n\nThese problems are by no means unique to GitLab. Any engineering company that is scaling rapidly will encounter some growing pains when it comes to hiring, and many will end up falling back on some of the typical models for conducting technical interviews.\n\n## The typical technical interview methods\n\nDuring his talk, [\"Using GitLab to Power our Frontend Technical Interviews\" at GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug), Clement explained the four different techniques that are often employed in technical interviews. Each method comes with advantages and disadvantages from the perspective of the hiring manager.\n\n## What are good technical interview questions\n\nA good technical interview needs to be about more than practical skills – it’s about the whole package.A candidate should possess the ideal coding skills but also be a team and culture fit and be able to discuss developer topics efficiently. A technical interview should include both situational interview questions and a skills assessment to discern a candidate’s potential.\n\nThe types of questions to ask can concern a candidate’s technical abilities and background, their career journey so far, and queries specific to the team or company.\n\n## Types of questions asked during a technical interview and their purpose\nEven though employers have already reviewed your resume and cover letter, they will want you to flesh that out during the interview to learn more about how you attained those skills. In order to assess your level of experience, they will likely also ask you to provide concrete examples from prior jobs.\nMake sure you are prepared—do your research on the company and the type of questions you may be potentially asked. This will help build your confidence level and reduce any nervousness you might feel. It’s also an opportunity for you to set yourself apart from other candidates by showcasing your knowledge and additional skills you can bring to the job.\nIt is important to be honest about your skill set because that is something employers value. You may find the company will be willing to hire someone who is transparent about the areas where they need to improve and where they’d like to gain more skills.\n\nExamples of common questions to expect in a technical interview:\n\n- What coding languages are you most familiar with?\n- What is your experience with Kubernetes with a specific example?\n- What’s the purpose of continuous integration in an automated build?\n- How have your previous technical roles prepared you for this job?\n- Tell me about a time when you received an unexpected assignment: how did you react, and what did the experience teach you?\n- Please provide more details about your educational background and how it prepared you for this position.\n- How did you go about teaching yourself a necessary technical skill while you were working on a project?\n- What are your strengths, and where do you think you need to improve your skills?\n- Do you have any technical certifications?\n- Please detail the work you did on the project you are most proud of.\n- What are your favorite and least favorite tech tools, and why?\n- What are the pros/cons of working in an agile environment?\n\n### Sample technical interview questions and answers\n- **How do you stay current with your technical knowledge and skills?** It’s a good idea to list online content you use to educate yourself, as well as tutorials and conferences you have attended to gain more knowledge. Perhaps you have also worked closely with vendors or attended sessions to learn about new product features.\n- **How do you troubleshoot technical problems?** Discuss the steps you take when you are answering a question. This will give employers a sense of how you problem-solve, and it provides a good overview of how well you understand the relevant concepts. Even if you don’t answer a question correctly, it will show the interviewer your process and reasoning, which are also important. You can mention resources you use, such as GitLab and Stack Exchange, as well as the developer community and any publications you read for advice.\n- **What is your level of experience with the software programs mentioned on your resume?** Describe how many years you have used the tools, your impressions of them, and bring up the companies you used them at, with specific examples.\n- **What programming language are you most proficient in?** You should discuss how you have become proficient in this language and why it is the one you are most comfortable using. You can also cite other languages you are familiar with.\n- **Describe a time you made an error and how you resolved it.** Don’t use an example of an egregious error since that may put you in a negative light. Be sure to emphasize that you took responsibility and acted with integrity, and did whatever it took to resolve the issue.\n\n## What are some soft skills and coding skills to highlight in a technical interview\n\nA technical interview assesses your technical expertise, coding skills, and ability to fit into a team. However, soft skills are just as important and often aid in the development of more technical skills – particularly in a team setting.\n\nAs the technical interview progresses, be prepared to tackle some questions about soft skills like:\n\n- **Communication skills:** How does the candidate contribute to group discussions, confront problems, or give and receive feedback?\n- **Organizational skills:** What are the ways in which the candidate provides visibility into their work processes and their methods of staying on task?\n- **Collaboration skills:** Are they interested in helping their teammates? What do they think are the keys to successfully navigating a team project? How have they collaborated on past projects?\n- **Creative problem solving:** How do they work through a problem in a project? Do they use both analytical and creative thinking to come up with solutions?\n\n### How to prepare for verbal technical questions\n\nThere are countless articles online that try to prepare job candidates for a verbal technical interview, but whether this method truly effective for evaluating the technical competency of a software engineer is debatable.\n\nIn the typical scenario, the interviewer asks the candidate to describe a technical concept and tries to measure their fluency in said concept based on the quality of the conversation.\n\nThe advantage of this method is that the interviewer can understand how the candidate communicates, which is of particular importance when the engineering team is all-remote, as is the case at GitLab. The drawback? Being a good communicator does not necessarily mean the candidate knows how to code effectively.\n\n\"So I've interviewed candidates that could talk the talk, but they couldn't really write the code,” says Clement. \"And that's not a great situation for an engineer to join GitLab.\" Clement’s team has moved away from using verbal technical questions as a method for evaluating candidates.\n\n### Live coding exercises\n\nOne of the more popular methods for evaluating engineers is through live coding. While it allows the evaluator to see how engineering candidates answer data structure questions, it also has its disadvantages.\n\nA key advantage of live coding data structures is that it offers a fairly consistent measurement and evaluation.\n\n\"I can talk to another manager or another interviewer and be able to communicate, 'Hey, this person wasn't able to do a linked list, they got stuck here. They weren't able to understand a runtime efficiency here.' So it's pretty consistent,\" says Clement.\n\nBut the ability to create data structures is not always the best indicator of ability. Oftentimes engineers with a very traditional background or recent graduates will shine here, but someone who is more senior and able to do a lot of great things, but is perhaps not as brushed up on data structures, may struggle.\n\nLive coding interviews probably aren’t going anywhere fast, but the pitfalls of this method are well documented by engineers and hiring managers. Brennan Moore, a product engineer in New York City, explains why he does not conduct live coding interviews when evaluating a prospective candidate:\n\n> \"Much like the SAT when applying for college, live coding is a structured test. I didn’t go to a school that trained me to do live coding, and so will probably fail the test. As I’ve experienced it, live coding isn’t the meritocratic space that it pretends to be. Live coding interviews weed out the people who are good at live coding interviews,\" says Brennan in his [blog post](https://www.zamiang.com/post/why-i-don-t-do-live-coding-interviews).\n\nAt GitLab, we found that live coding exercises don't accurately represent engineering capability. Oftentimes, a recent computer science graduate will outperform a more senior candidate with a lot of valuable experience. In summary, live coding exercises will often disadvantage more senior candidates, people who are nervous in high-pressure situations (read: everyone), and advantages more junior engineers or people who have practiced live coding.\n\n### Digital prompt\n\nA third common method for evaluating candidates is to ask the engineer to code a UI using an online editor while on screen share with the evaluator.\n\nThe advantage of this method is that it allows the evaluator to observe how a candidate builds. The drawbacks here are similar to those with live coding. First, the engineer is under pressure to build while the evaluator watches on, making it a nerve-wracking situation. The other drawbacks come from an evaluation perspective: It is challenging to measure the effectiveness of this method and it is hard to compare between candidates.\n\n### Take-home project\n\nAny engineer (or writer, for that matter) can tell you, the supplemental take-home project is a very common ask when going through an interview process. The advantage here for us is that this assignment closely mimics the reality of building environments while working remotely at GitLab.\n\nBut this task comes with major drawbacks, mainly that it disadvantages candidates who may not have the time or capacity to complete the project.\n\n\"... imagine a scenario where you're a single parent and you have kids; you may not have as much opportunity to take dedicated time, a couple of hours after work to really focus on a take-home project compared to someone from a more privileged background,\" says Clement. \"They might be able to dedicate and output something better.\"\n\n[Diversity and inclusion is a core value](/company/culture/inclusion/) for GitLab, and anything that disadvantages candidates from underrepresented groups is not inclusive, and therefore suboptimal for evaluating candidates based on their engineering abilities.\n\n## What are they looking for during a technical interview?\n\nCompanies want candidates who can discuss the industry in the context of the job they are applying for. Be prepared to discuss examples of your work. Many will want to hear about soft skills, too—your ability to communicate and collaborate and work with others to problem-solve issues.\n\nThey will also want to see how passionate and enthusiastic you are and whether you have the self-motivation to not only do the job but take the initiative to do more than what you’re tasked with.\n\nAlso, interviewers will want to see whether candidates have the desire to increase their technical knowledge.\n\n## What are some online preparation tools and resources for technical interviews\n\n- Indeed offers a career guide to [help prepare for](https://www.indeed.com/career-advice/interviewing/what-is-a-technical-interview) a technical interview.\n- Interview Kickstart has several [webinars](https://learn.interviewkickstart.com/) to help prepare engineers for interviews.\n- Udemy offers a course in [Technical Interview Skills](https://www.udemy.com/course/technical-interview-skills/?utm_source=bing&utm_medium=udemyads&utm_campaign=BG-DSA_Webindex_la.EN_cc.BE&utm_content=deal4584&utm_term=_._ag_1222657343651662_._ad__._kw_udemy_._de_c_._dm__._pl__._ti_dat-2328215871879260%3Aloc-190_._li_103429_._pd__._&matchtype=b&msclkid=9f5132d9c84c17b02f7951a4f46279d6).\n- [Codecademy](https://www.codecademy.com/learn/technical-interview-practice-python?utm_id=t_kwd-79027793284383:loc-190:ag_1264438993811076:cp_370314525:n_o:d_c&msclkid=550de1275d811b2cfc0f82592b6d9626&utm_source=bing&utm_medium=cpc&utm_campaign=US%20Language%3A%20Pro%20-%20Broad&utm_term=%2Btechnical%20%2Binterview%20%2Bprep&utm_content=technical%20interview%20practice) also offers a course called - Technical Interview Practice with Python.\n- Here are some more general [interview tips](https://www.roberthalf.com/blog/job-interview-tips/interview-tips-to-help-you-land-the-job-you-want) that are applicable to all candidates.\n\n## Meaningful questions to ask the interviewer\n\nCandidates will also be given a chance to ask questions they might have to learn more about the company. This is a great opportunity to gain more insight into how the company operates, what its philosophy is, and its vision for the long term.\n\nIt’s also a good way to glean how the company views its IT team. If you don’t ask questions, that could give the impression you are unprepared or not terribly interested in the job.\n\nQuestions to ask can include:\n\n- What does a typical day looks like in this role?\n- Are there opportunities for training and further advancement?\n- What software development methodology do you use?\n- What are your code review practices?\n- Do you have on-call rotations? If so, how long is one rotation?\n- What are the responsibilities of the person on call?\n- Please provide more details about the team I will be working with, such as how many people are there, what their roles are, what the hierarchy is, and what areas of improvement you would like to see on the team.\n\n## The new way\n\nWhile each method for conducting a technical interview comes with advantages, there are also numerous disadvantages when it comes to conducting an effective and measurable evaluation and creating an equitable interview process. Under the guidance of Clement, the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) decided to interview frontend engineers in an entirely new way using GitLab.\n\nNow let's take a deep dive into the nuts and bolts of reinventing the technical interview for frontend engineers at GitLab. Just wondering about the key takeaways? [Skip ahead](#why-this-new-model-for-technical-interviews-is-better). As we continue to iterate on a more effective and measurable technical interview process, we hope this inspires other engineering organizations to rethink theirs and share learnings with us.\n\nOur first step: Standardize the interview process.\n\n### Fixing an MR on a test project\n\nThe team standardized the interview process by creating an open source test project, called `project-seeder`, which seeds projects to different candidates using a GitLab Bot. Candidates are assigned a merge request to troubleshoot in the project created for the technical interview. The `project-seeder` is powered by the GitLab Bot so the interviewer doesn't have to worry about API keys, and works in four steps:\n\n1. Exports the template project\n2. Imports template project\n3. Adds users with expiration\n4. Triggers pipeline for candidate to review MR\n\nThe candidate is sent an email with a link to the MR the candidate is assigned to fix as part of the technical interview.\n\n### Standardize the evaluation rubric\n\nThe team also created a standardized rubric for how the candidate's performance on a technical interview is evaluated.\n\n\"We don't want to be in a situation where unconscious bias or bias of one candidate over another plays a part because of our preconceived notions,\" says Clement.\n\nCreating a rubric that looks at multiple categories allows the evaluator to look at the performance of the candidate from a more holistic perspective, as opposed to looking at a candidate's performance on one technology.\n\nThe team created a [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) to create a feedback loop between the candidates and evaluators to identify opportunities for improvement in the technical interviewing process.\n\n![Frontend team used Periscrope to collect feedback from candidates who participate in technical interviews](https://about.gitlab.com/images/blogimages/fei_periscopedashboard.jpg){: .shadow.medium.center}\n\nThe frontend engineering team used Periscope to collect feedback from candidates who participate in technical interviews.\n{: .note.text-center}\n\n## Demoing the technical interview\n\n### Inside the technical interview project\n\nClement created a sample project to demonstrate how we use GitLab to power our technical interviews.\n\nIn the [gl-commit-example](https://gitlab.com/gl-commit-example) group, there is a subgroup with all the interview projects we are seeding to the imaginary candidates, a template, and a project seeder.\n\n![A screenshot of the sample project shows the interview project's subgroup, template, and project seeder application](https://about.gitlab.com/images/blogimages/fei_interviewproject.jpg){: .shadow.medium.center}\n\nThe interview project's subgroup, template, and project seeder application lives inside the sample project for the technical interview.\n{: .note.text-center}\n\n[Inside the template](https://gitlab.com/gl-commit-example/template), there are GitLab pages and the [interview test merge request](https://gitlab.com/gl-commit-example/template/-/merge_requests/1).\n\nThe assignment here is pretty simple. The candidate needs to update the website to say \"Hello GitLab Commit SF,\" but in order to accomplish this, the candidate will need to fix the failing pipeline.\n\n### Powering project-seeder\n\nWe use variables from GitLab CI to configure the [project-seeder application](https://gitlab.com/gl-commit-example/project-seeder).\n\n![Screenshot of the project for the project-seeder application](https://about.gitlab.com/images/blogimages/fei_projseederapp.jpg){: .shadow.medium.center}\n\nInside the project-seeder application which seeds the interview projects to job candidates.\n{: .note.text-center}\n\n\"I'm creating `new-project-example-two`, and I'm adding this bot user that I created and the expiration, so I can just easily run this pipeline and it'll seed this project,\" says Clement.\n\n![We use variables from the GitLab CI to configure the project-seeder applications](https://about.gitlab.com/images/blogimages/fei_variables.jpg){: .shadow.medium.center}\n\nThe next step is to run the setup pipeline, which will create the project, import the project, export the project, and share it with the job candidate.\n\n![A look inside the pipeline that will create the test project](https://about.gitlab.com/images/blogimages/fei_insidethepipeline.jpg){: .shadow.medium.center}\nA look inside the pipeline that will create the test project.\n{: .note.text-center}\n\nLooking inside example-one, we can see there is a project and [broken MR](https://gitlab.com/gl-commit-example/interview-projects/example-1/-/merge_requests/1).\n\n\"And an example for a candidate – they would probably look at the CI and see, 'Oh there's a failing test. Let's see what that's about. Oh, it looks like it's checking for \"hello world\". So since we changed the message earlier, we can just change this and get this test passing and then pass this interview,'\" says Clement.\n\n## Why this new model for technical interviews is better\n\nThe new model surpasses the old model because we created realistic scenarios that reflect what it's like to actually work for GitLab, and we established a more consistent method of measurement.\n\n\"So we're able to get better candidates overall. Candidates that pass through this technical interview, we're sure that they're going to be successful at GitLab,\" says Clement.\n\nBy designing our technical interviews this way, we can ensure that the interview project matches our actual product architecture at GitLab, which in this case is Ruby on Rails for Vue JS.\n\nWe also struggled in the past with finding a good way to check that the candidate knows how to use Git, and can navigate pipelines and testing. By using GitLab for interviews, we're able to confirm a candidate's competency with Git implicitly by evaluating their performance on the technical interviews.\n\nWe wanted to mirror the actual experience of troubleshooting a broken MR while working at GitLab, so we allow our candidates to use the internet during their technical interview. This allows the evaluator to see how the candidate solves problems and see their resourcefulness.\n\n\"If you're already using GitLab for your tooling, you're just exposing them to what it's like to work at GitLab; it's a more accurate representation,\" says Clement. \"And you can also make sure you're measuring testing proficiency and you make sure they understand how that works before they join your company.\"\n\n## Four key takeaways from our technical interview update\n\nWhether or not a company uses GitLab, there are a few key lessons that we learned by iterating on how we conduct technical interviews for engineers.\n\n1. **Make technical interviews as much like real work as possible**: Nine times out of ten, an engineering manager isn't going to sit back and watch an engineer break a sweat in a live coding exercise, any more than they will watch on as an engineer builds in UI. Create realistic scenarios based on the actual work and evaluate based on the candidate's performance.\n\n2. **Make any technical interview process \"open-book\"**: Engineering doesn't involve much rote memorization. Instead, allow the engineering candidate to use the internet (and in our case, the [GitLab Handbook](https://handbook.gitlab.com/handbook/)) to look up their questions. It's better to see how a candidate applies their knowledge and troubleshoots the inevitable problems that may arise. This change will likely improve your candidate experience too.\n\n3. **Standardize your rubric**: However the technical interview is done, make sure that the rubric is as objective as possible and that the candidate is evaluated based on various criteria, not on their familiarity with a particular technology. A strong rubric means a stronger, more valid method for evaluating candidate performance.\n\n4. **Create an inclusive process**: Think critically about how the technical interviewing process and evaluation is structured so a diverse group of candidates can be recruited and evaluated based on their merits. When in doubt, ask a diversity, inclusion and belonging expert or turn to your human resources team for advice. Still coming up empty? Hire a diversity consultant; it will be worth it.\n\n**Interviewing at GitLab?** We encourage you to use the resources GitLab creates during your technical interview. We don't publish our evaluation criteria publicly, but we do have the [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) which can provide some insight.\n\nWatch Clement's talk from [GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug) to learn more about how we used GitLab to power our technical interviewing process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/jSbCt8b_4ug\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,804],{"slug":1103,"featured":6,"template":699},"the-trouble-with-technical-interviews","content:en-us:blog:the-trouble-with-technical-interviews.yml","The Trouble With Technical Interviews","en-us/blog/the-trouble-with-technical-interviews.yml","en-us/blog/the-trouble-with-technical-interviews",{"_path":1109,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1110,"content":1116,"config":1124,"_id":1126,"_type":13,"title":1127,"_source":15,"_file":1128,"_stem":1129,"_extension":18},"/en-us/blog/understanding-and-improving-total-blocking-time",{"title":1111,"description":1112,"ogTitle":1111,"ogDescription":1112,"noIndex":6,"ogImage":1113,"ogUrl":1114,"ogSiteName":685,"ogType":686,"canonicalUrls":1114,"schema":1115},"Total Blocking Time - The metric to know for faster website performance","Learn how to identify and fix some root causes for high Total Blocking Time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682637/Blog/Hero%20Images/tbt_cover_image.jpg","https://about.gitlab.com/blog/understanding-and-improving-total-blocking-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Total Blocking Time - The metric to know for faster website performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacques Erasmus\"}],\n        \"datePublished\": \"2023-02-14\",\n      }",{"title":1111,"description":1112,"authors":1117,"heroImage":1113,"date":1119,"body":1120,"category":802,"tags":1121},[1118],"Jacques Erasmus","2023-02-14","\n\nOur world overwhelms us with information that is more accessible than ever. The increasing rates of content production and consumption are gifts that keep on giving. We can't seem to keep up with the information thrown at us. We're limited by our cognitive limitations and time constraints, and a [recent study](https://www.nature.com/articles/s41467-019-09311-w) concluded the result is a shortening of attention spans. Websites are no exception.\n\nUsers who interact with your website want feedback, and want it fast. Preferably immediately! Website performance has become an important factor in keeping users engaged. But how do you measure how unresponsive a page is before it becomes fully interactive?\n\nMany [performance metrics](https://web.dev/vitals/) exist, but this blog post focuses on Total Blocking Time (TBT).\n\n## What is Total Blocking Time?\n\nTBT measures the total amount of time tasks were blocking your browser's main thread. This metric represents the total amount of time that a user could not interact with your website. It's measured between [First Contentful Paint (FCP)](https://web.dev/fcp/) and [Time to Interactive (TTI)](https://web.dev/tti/), and represents the combined blocking time for all long tasks.\n\n## What is a long task?\n\nA long task is a process that runs on the main thread for longer than 50 milliseconds (ms). After a task starts, a browser can't interrupt it, and a single long-running task can block the main thread. The result: a website that is unresponsive to user input until the task completes.\n\nAfter the first 50 ms, all time spent on a task is counted as _blocking time_. This diagram shows five tasks, two of which block the main thread for 140 ms:\n\n![A diagram containing five tasks, two of which are blocking the main thread. The TBT for these tasks adds up to 140 ms.](https://about.gitlab.com/images/blogimages/tbt/tasks_diagram.png)\n\n## How can we measure TBT?\n\nMany tools measure TBT, but here we’ll use [Chrome DevTools](https://developer.chrome.com/docs/devtools/evaluate-performance/) to analyze runtime performance.\n\nAs an example: We recently improved performance on GitLab's [**View Source** page](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab-ci.yml). This screenshot, taken before the performance improvement, shows eight long-running tasks containing a TBT of **2388.16 ms**. That's more than **two seconds**:\n\n![A screenshot indicating that there are eight long-running tasks. The TBT of these tasks adds up to 2388.16 ms.](https://about.gitlab.com/images/blogimages/tbt/summary_before.png)\n\n## How can we improve TBT?\n\nAs you might have guessed by now, reducing the time needed to complete long-running tasks reduces TBT.\n\nBy selecting one of the tasks from the previous screenshot, we can get a breakdown of how the browser executed it. This **Bottom-Up** view shows that much time is spent on rendering content in the Document Object Model (DOM):\n\n![A screenshot of the Bottom-Up view of one of tasks from the previous screenshot. It indicates that most of the time is being spent on rendering content in the DOM.](https://about.gitlab.com/images/blogimages/tbt/task_7_before.png)\n\nThis page has a lot of content that is below the fold – not immediately visible. The browser is spending a lot of resources upfront to render content that is not even visible to the user yet!\n\nSo what can we do? Some ideas:\n\n- **Change the UX.**\n  - Add a Show More button, paging, or virtual scrolling for long lists.\n- **Lazy-load images.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65745))\n    - Lazy-loading images reduces page weight, allowing the browser to spend resources on more important tasks.\n- **Lazy-load long lists.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - Similar to lazy-loading images, this approach allows the browser to spend resources on more important tasks.\n- **Reduce excessive HTML.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65835))\n    - For example, when loading large pages consider removing unnecessary content. Or, consider rendering some content (like icons) with CSS instead.\n- **Defer rendering when possible.**\n    - The [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property ensures the rendering of off-screen elements (and thus irrelevant to the user) is skipped without affecting the page layout. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67050))\n    - The [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API) allows you to observe when elements intersect with the viewport. This information can be used to show or hide certain elements. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - The global [`requestIdleCallback` method](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) can be used to render content after the browser goes into an idle state.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n\nFrameworks such as VueJS and React are already heavily optimized. However, be mindful of how you use these frameworks to avoid expensive tasks.\n\n### Change VueJS usage to improve TBT\n\nThis screenshot shows the **Bottom-Up** view of a task. Much of the task time is spent on activities from third-party code in the VueJS framework:\n\n![A screenshot of the Bottom-Up view of one of tasks. It indicates that a lot of the time is being spent on activities in the third-party VueJS framework.](https://about.gitlab.com/images/blogimages/tbt/task_6_before.png)\n\nWhat improvements can we make?\n\n- **Use [Server-side rendering (SSR)](https://gitlab.com/gitlab-org/gitlab/-/issues/215365) or [streaming](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/101)** for pages that are sensitive to page load performance.\n- **If you don't _need_ Vue, don't use it.**\n  Component instances are a lot more expensive than using plain DOM nodes. Try to avoid unnecessary component abstractions.\n- **Optimize component [props](https://vuejs.org/guide/components/props.html).**\n  Child components in Vue update when at least one of their received props are being updated. Analyze the data that you pass to components. You may find that you can avoid unnecessary updates by making changes to your props strategy.\n- **Use [v-memo](https://vuejs.org/api/built-in-directives.html#v-memo) to skip updates.**\n    - In Vue versions 3.2 and later, `v-memo` enables you to cache parts of your template. The cached template updates and re-renders only if one of its provided dependencies changes.\n- **Use [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for data** that does not need to be reactive after the initial load.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942))\n    - `v-once` ensures the element and component are only rendered once. Any future updates will be skipped.\n- **Reduce expensive tasks in your Vue components.**\n  Even a small script may take a long time to finish if it’s not optimized enough. Some suggestions:\n    - By using [`requestIdleCallback`](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) you can defer the execution of the non-critical tasks. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n    - By executing expensive scripts in [WebWorkers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) you can unblock the main thread.\n\n### Results and methods\n\nBy using three of the methods suggested above, we reduced TBT from about **3 seconds** to approximately **500 ms**:\n\n![A chart indicating a drop in TBT from ~3 seconds to ~500 milliseconds.](https://about.gitlab.com/images/blogimages/tbt/chart_after.png)\n\nWhat did we do?\n\n- Deferred rendering by using the [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property.\n- Deferred rendering by using the [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API).\n- Used [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for content that didn't need to be reactive after rendering.\n\nRemember, the size of the decrease always depends on how optimized your app already is to begin with.\n\nThere is a lot more we can do to improve TBT. While the specific approach depends on the app you're optimizing, the general methods discussed here are very effective at finding improvement opportunities in any app. Like most things in life, a series of the smallest changes often yield the biggest impact. So let's [iterate](/blog/dont-confuse-these-twelve-shortcuts-with-iteration/) together, and adapt to this ever-changing world.\n\n> “Adaptability is the simple secret of survival.” – Jessica Hagedorn\n\n_Cover image by [Growtika](https://unsplash.com/@growtika?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/Iqi0Rm6gBkQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,1122,1123],"performance","tutorial",{"slug":1125,"featured":6,"template":699},"understanding-and-improving-total-blocking-time","content:en-us:blog:understanding-and-improving-total-blocking-time.yml","Understanding And Improving Total Blocking Time","en-us/blog/understanding-and-improving-total-blocking-time.yml","en-us/blog/understanding-and-improving-total-blocking-time",{"_path":1131,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1132,"content":1138,"config":1144,"_id":1146,"_type":13,"title":1147,"_source":15,"_file":1148,"_stem":1149,"_extension":18},"/en-us/blog/why-do-gitlab-designers-contribute-to-the-codebase",{"title":1133,"description":1134,"ogTitle":1133,"ogDescription":1134,"noIndex":6,"ogImage":1135,"ogUrl":1136,"ogSiteName":685,"ogType":686,"canonicalUrls":1136,"schema":1137},"Why do GitLab designers contribute to the codebase?","This article is not another blog post about whether designers should code. Instead, it's the perspective of a GitLab designer learning to contribute.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679556/Blog/Hero%20Images/insights.png","https://about.gitlab.com/blog/why-do-gitlab-designers-contribute-to-the-codebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why do GitLab designers contribute to the codebase?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2021-03-17\",\n      }",{"title":1133,"description":1134,"authors":1139,"heroImage":1135,"date":1141,"body":1142,"category":1057,"tags":1143},[1140],"Austin Regnery","2021-03-17","\n\n\n\nWorking with engineering in the past used to feel so foreign to me. I never truly understood all the complexities of collaborative software development, and in full transparency, I still don’t. However, using GitLab has taught me how to contribute to the success of our product.\n\n## We believe everyone can contribute\n\nAt GitLab, one of our [goals](https://about.gitlab.com/company/mission/#goals) is to ensure that everyone can contribute to GitLab the application and the company. To help share this working knowledge, everyone that works at GitLab must add themselves to the [team page](https://about.gitlab.com/company/team/). Conquering this development task can be daunting because there are new terms and lots of steps. However, our [documentation](https://about.gitlab.com/handbook/git-page-update/#12-add-yourself-to-the-team-page) does a great job of helping reduce the barrier of entry.\n\n## Develop shared empathy\n\nI never had access to the codebase in previous product teams because it was far too time-consuming to get a build environment configured on my computer. For GitLab, this is a requirement so that I can review changes before they go to production. During onboarding, I invested a decent amount of time setting up the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/README.md) (gdk) on my computer. It was a challenge, but now I know why getting developers up and running can be incredibly complex. I can even more greatly appreciate the [GitPod integration](https://docs.gitlab.com/ee/integration/gitpod.html), which does all the heavy lifting of setup for you in minutes.\n\n![GitPod + GitLab = Love](https://about.gitlab.com/images/blogimages/why-do-gitlab-designers-contribute-to-the-codebase/teaser-gitlab-gitpod.jpg)\nSource: [GitLab Support for Gitpod is Here](https://www.gitpod.io/blog/gitlab-support/)\n{: .note.text-center}\n\nOnce my setup was ready to go, I was able to jump in. When I started at GitLab there was a [quarterly goal](https://gitlab.com/groups/gitlab-org/-/epics/3914) to migrate our button components to the new front-end we were using. I migrated several buttons, but [the one I am most proud of](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46990) required me to step into an area of GitLab I was completely unfamiliar with. I had to get a niche button to verify that my changes were correct, which required me to learn how to get [Terminal working in the GitLab WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/#interactive-web-terminals-for-the-web-ide). Then, I reached out to other designers and team members to get my [runners](https://docs.gitlab.com/runner/) to function correctly. This helped me understand more complex areas of GitLab better than just reading the documentation. It is one thing to read about something, and a totally different beast to make something work yourself.\n\nThis idea of [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding) is something we uphold as a sub-value at GitLab. By using GitLab to contribute to GitLab the application and company, we put ourselves in our users’ shoes. If we don’t like something then we are that much more motivated to change it.\n\n## Diversify skill sets\n\nAs a designer, I want to have a functional understanding of the frontend framework I am designing within. Having the basics down allowed me to communicate expectations, minimize assumptions, and ask insightful questions. Working through the initial learning curve has helped me tremendously in the coming months for scoping designs and working alongside engineering.\n\nSometimes rather than just sending a mockup to my engineers, I’ll open a Merge Request to propose a change instead. For example, I could have asked them to update the border color of a table, but I discovered removing an extra CSS class would fix the problem; submitting that change was much faster than creating a mockup and chatting about it asynchronously.\n\nIt's much easier to get input from engineers if you are talking about something specific in the codebase, instead of something more nebulous like border colors. Engineers will have to dig into the codebase to reference what is there, so help save them a step if you can. Discussing an explicit change is actionable, which is why we say [everything starts with a merge request](https://handbook.gitlab.com/handbook/communication/start-with-a-merge-request).\n\n> ### \"It's much easier to get input from engineers if you are talking about something specific in the codebase.\"\n\nDoing smaller and simpler changes made me comfortable trying more complex ideas like [replacing label colors with common names](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50393). Each merge request taught me something new. I learned more about keeping my code formatting clean, writing good commit messages, and how to resolve failing pipelines. All things that contributors to GitLab must learn at some point in time.\n\nAs I learn about the different nuances that come with various pages in GitLab, I rely less on asking questions because I can look them up myself. For example, it is not always visually identifiable in the GitLab UI if a page is coded in HAML or Vue. Before I suggest a change or even start designing in some cases, I look for these differences in the codebase. Touching HAML can be more complicated than working with the Vue components documented in Pajamas.\n\nFor User Research, I can use these small changes for [Short Tests](https://help.usertesting.com/hc/en-us/articles/360055473112-Short-Tests-Beta-) instead of using complex prototypes in Figma. Using research to drive decision-making can help reduce subjective bias for implementing an idea.\n\n![Comparing a change before and after in a Merge Request](https://about.gitlab.com/images/blogimages/why-do-gitlab-designers-contribute-to-the-codebase/before-after.png)\nTesting a live environment can be useful for validating changes - [View Merge Request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/51753)\n{: .note.text-center}\n\nNot only can I make more informed design decisions, but I can also contribute ideas that others are excited about. I am actively working on two:\n\n- [Add UX reviewer/maintainer to Danger bot](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/51127)\n- [Add a shortcut for collapsible section markdown](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/54938)\n\nI started working on both ideas while I was between meetings, and I have continued to progress them along ad-hoc. It has been fun to see some of my ideas make it into GitLab, but I also have a nice collection of [scrapped ideas](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=closed&author_username=aregnery).\n\n## Conclusion\n\nI have already come a long way from making my first Merge Request at GitLab. I thought I knew the basics of git, but going through this process helped me get my feet wet with more complex development and working in a single repository. I learned about having others review and approve my changes, the magic of seeing checkmarks for all pipelines, and finally, the Merge Request badge turning from Open to Merged.\n\nIf you are interested in learning how I create small merge requests, then watch this walkthrough of my process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n    \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/PnxHQGpFD1w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n",[848,1012,1013,1081,9],{"slug":1145,"featured":6,"template":699},"why-do-gitlab-designers-contribute-to-the-codebase","content:en-us:blog:why-do-gitlab-designers-contribute-to-the-codebase.yml","Why Do Gitlab Designers Contribute To The Codebase","en-us/blog/why-do-gitlab-designers-contribute-to-the-codebase.yml","en-us/blog/why-do-gitlab-designers-contribute-to-the-codebase",{"_path":1151,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1152,"content":1158,"config":1164,"_id":1166,"_type":13,"title":1167,"_source":15,"_file":1168,"_stem":1169,"_extension":18},"/en-us/blog/why-we-chose-echarts",{"title":1153,"description":1154,"ogTitle":1153,"ogDescription":1154,"noIndex":6,"ogImage":1155,"ogUrl":1156,"ogSiteName":685,"ogType":686,"canonicalUrls":1156,"schema":1157},"Why we chose ECharts for data visualizations","Learn why GitLab switched from D3.js to ECharts as our library of choice for rendering data visualizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666699/Blog/Hero%20Images/banner.jpg","https://about.gitlab.com/blog/why-we-chose-echarts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we chose ECharts for data visualizations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2019-09-30\",\n      }",{"title":1153,"description":1154,"authors":1159,"heroImage":1155,"date":1161,"body":1162,"category":802,"tags":1163},[1160],"Clement Ho","2019-09-30","\nAs GitLab continues to grow in depth and breadth across the [DevOps lifecycle](/topics/devops/), the use of charts and data visualizations has increased in frequency and complexity. Throughout the life of GitLab as a project, we've used multiple libraries to render beautiful charts. As the number of different libraries increased along with our charting requirements, we decided it was time to start unifying our charting libraries to help us move quickly.\n\nAt first, we wanted to unify our charts using D3.js but this was difficult because D3.js isn't a charting library. In their own words: \"D3.js is a JavaScript library for manipulating documents based on data,\" meaning it is a low level visualization tool. D3.js is powerful but it has a big learning curve. Our team did not have the time to develop the expertise without impacting our product development velocity. We also knew we had an ambitious hiring plan, and we would be adding time to our onboarding process by using D3.js.\n\nThe frontend team set out to investigate different charting libraries that we could use to gain more velocity. The library didn't have to do everything we needed, but it had to get us most of the way there. We investigated many libraries including ECharts, Britecharts, and Plotly as potential options. In the end, ECharts was the clear winner for us. Here's why:\n\n## Echarts robust yet flexible chart types\nOn the monitor stage frontend team, we have the [ambitious goal of replacing well-known monitoring tools like DataDog and Grafana](/direction/monitor/). It was absolutely critical that our charting library had enough flexibility for us to create our own custom charts, but it was also important that the library had existing charts so that we didn’t have to create every chart from scratch for the sake of development velocity.\n\nECharts has an [incredible showcase](https://echarts.apache.org/examples/en/) of the adaptability of their charts. This was a great starting point for us. We tested out styling ECharts to match our design system to determine how adaptable it was and we were very satisfied with the results.\n\n![design](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/design.png)\n*Design spec for future GitLab charts.*\n\n![implementation](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/implementation.png)\n*Evaluation implementation using ECharts.*\n\n## Echarts performance\nWhen we were evaluating ECharts, we took one of our most complex user interactions for charts to benchmark the performance of the charting library. Although ECharts wasn’t perfect, it fared better than the alternatives. Below are some gifs recorded from changing the chart values in our [evaluation project](https://gitlab.com/adriel/echarts-proof-of-concept). As you can see, performance does decrease as the data points increase but it is still usable and it is unlikely we would have that many points in such a small chart.\n\n![10 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/10-points.gif)\n*Linked chart with 10 values.*\n\n![100 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/100-points.gif)\n*Linked chart with 100 values.*\n\n![1000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/1000-points.gif)\n*Linked chart with 1000 values.*\n\n![4000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/4000-points.gif)\n*Linked chart with 4000 values.*\n\n## Growing ecosystem\n\nECharts isn’t perfect but it has [improved over time](https://incubator.apache.org/projects/echarts.html). It started off as an [open source project from Baidu](https://whimsy.apache.org/board/minutes/ECharts.html) but is still going through the process of being incubated into the Apache Software Foundation. The [majority of ECharts users still seem to be based in China](https://echarts.apache.org/en/committers.html), meaning the developer community and corresponding documentation is written primarily in Chinese. Despite some language barriers, the ECharts community does seem to be growing more internationally. We’ve come across a variety of companies from the United States and Mexico who are either evaluating or using ECharts internally.\n\nThe Podling Project Management Committee (PPMC) of ECharts, which is their core team in GitLab terms, has also been very welcoming and energetic about growing the ecosystem. As we decided on ECharts and began developing new charts and replacing old charts, we’ve been able to build a partnership with the company. They have been very kind to meet with us online every month to help answer questions and to guide us in using their library effectively. This has been extremely helpful. For example during one of our meetings, Shuang Su gave us a brief walkthrough of the codebase and it's architecture.\n\n## Where we are today with Echarts\n\nWe introduced [ECharts to the GitLab codebase in 11.6](https://gitlab.com/gitlab-org/gitlab-ce/issues/53147) and through ECharts have been rapidly building new chart types into our component library at a faster rate than ever before. We started with updating the charts in just our Monitor stage but have since introduced charts into the [Secure](https://gitlab.com/gitlab-org/gitlab-ee/issues/6954) and [Manage](https://gitlab.com/gitlab-org/gitlab-ee/issues/12079) stages.\n\nDepending on your use case, Apache ECharts could be a good fit for you too. For our team, ECharts has without a doubt increased our product development velocity over against what it was with D3.js.\n\n| Old chart in D3.js | New chart in ECharts |\n|",[804,1014,9],{"slug":1165,"featured":6,"template":699},"why-we-chose-echarts","content:en-us:blog:why-we-chose-echarts.yml","Why We Chose Echarts","en-us/blog/why-we-chose-echarts.yml","en-us/blog/why-we-chose-echarts",{"_path":1171,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1172,"content":1178,"config":1185,"_id":1187,"_type":13,"title":1188,"_source":15,"_file":1189,"_stem":1190,"_extension":18},"/en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions",{"title":1173,"description":1174,"ogTitle":1173,"ogDescription":1174,"noIndex":6,"ogImage":1175,"ogUrl":1176,"ogSiteName":685,"ogType":686,"canonicalUrls":1176,"schema":1177},"Why we spent the last month eliminating PostgreSQL subtransactions","How a mysterious stall in database queries uncovered a performance limitation with PostgreSQL.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669470/Blog/Hero%20Images/nessie.jpg","https://about.gitlab.com/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we spent the last month eliminating PostgreSQL subtransactions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grzegorz Bizon\"},{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2021-09-29\",\n      }",{"title":1173,"description":1174,"authors":1179,"heroImage":1175,"date":1182,"body":1183,"category":802,"tags":1184},[1180,1181],"Grzegorz Bizon","Stan Hu","2021-09-29","Since last June, we noticed the database on GitLab.com would\n\nmysteriously stall for minutes, which would lead to users seeing 500\n\nerrors during this time. Through a painstaking investigation over\n\nseveral weeks, we finally uncovered the cause of this: initiating a\n\nsubtransaction via the [`SAVEPOINT` SQL\nquery](https://www.postgresql.org/docs/current/sql-savepoint.html) while\n\na long transaction is in progress can wreak havoc on database\n\nreplicas. Thus launched a race, which we recently completed, to\n\neliminate all `SAVEPOINT` queries from our code. Here's what happened,\n\nhow we discovered the problem, and what we did to fix it.\n\n\n### The symptoms begin\n\n\nOn June 24th, we noticed that our CI/CD runners service reported a high\n\nerror rate:\n\n\n![runners\nerrors](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/ci-runners-errors.png)\n\n\nA quick investigation revealed that database queries used to retrieve\n\nCI/CD builds data were timing out and that the unprocessed builds\n\nbacklog grew at a high rate:\n\n\n![builds\nqueue](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/builds-queue.png)\n\n\nOur monitoring also showed that some of the SQL queries were waiting for\n\nPostgreSQL lightweight locks (`LWLocks`):\n\n\n![aggregated\nlwlocks](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/aggregated-lwlocks.png)\n\n\nIn the following weeks we had experienced a few incidents like this. We were\n\nsurprised to see how sudden these performance degradations were, and how\n\nquickly things could go back to normal:\n\n\n![ci queries\nlatency](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/ci-queries-latency.png)\n\n\n### Introducing Nessie: Stalled database queries\n\n\nIn order to learn more, we extended our observability tooling [to sample\n\nmore data from\n`pg_stat_activity`](https://gitlab.com/gitlab-cookbooks/gitlab-exporters/-/merge_requests/231).\nIn PostgreSQL, the `pg_stat_activity`\n\nvirtual table contains the list of all database connections in the system as\n\nwell as what they are waiting for, such as a SQL query from the\n\nclient. We observed a consistent pattern: the queries were waiting on\n\n`SubtransControlLock`. Below shows a graph of the URLs or jobs that were\n\nstalled:\n\n\n![endpoints\nlocked](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/endpoints-locked.png)\n\n\nThe purple line shows the sampled number of transactions locked by\n\n`SubtransControlLock` for the `POST /api/v4/jobs/request` endpoint that\n\nwe use for internal communication between GitLab and GitLab Runners\n\nprocessing CI/CD jobs.\n\n\nAlthough this endpoint was impacted the most, the whole database cluster\n\nappeared to be affected as many other, unrelated queries timed out.\n\n\nThis same pattern would rear its head on random days. A week would pass\n\nby without incident, and then it would show up for 15 minutes and\n\ndisappear for days. Were we chasing the Loch Ness Monster?\n\n\nLet's call these stalled queries Nessie for fun and profit.\n\n\n### What is a `SAVEPOINT`?\n\n\nTo understand `SubtransControlLock` ([PostgreSQL\n\n13](https://www.postgresql.org/docs/13/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW)\n\nrenamed this to `SubtransSLRU`), we first must understand how\n\nsubtransactions work in PostgreSQL. In PostgreSQL, a transaction can\n\nstart via a `BEGIN` statement, and a subtransaction can be started with\n\na subsequent `SAVEPOINT` query. PostgreSQL assigns each of these a\n\ntransaction ID (XID for short) [when a transaction or a subtransaction\n\nneeds one, usually before a client modifies\ndata](https://gitlab.com/postgres/postgres/blob/a00c138b78521b9bc68b480490a8d601ecdeb816/src/backend/access/transam/README#L193-L198).\n\n\n#### Why would you use a `SAVEPOINT`?\n\n\nFor example, let's say you were running an online store and a customer\n\nplaced an order. Before the order is fullfilled, the system needs to\n\nensure a credit card account exists for that user. In Rails, a common\n\npattern is to start a transaction for the order and call\n\n[`find_or_create_by`](https://apidock.com/rails/v5.2.3/ActiveRecord/Relation/find_or_create_by).\nFor\n\nexample:\n\n\n```ruby\n\nOrder.transaction do\n  begin\n    CreditAccount.transaction(requires_new: true) do\n      CreditAccount.find_or_create_by(customer_id: customer.id)\n  rescue ActiveRecord::RecordNotUnique\n    retry\n  end\n  # Fulfill the order\n  # ...\nend\n\n```\n\n\nIf two orders were placed around the same time, you wouldn't want the\n\ncreation of a duplicate account to fail one of the orders. Instead, you\n\nwould want the system to say, \"Oh, an account was just created; let me\n\nuse that.\"\n\n\nThat's where subtransactions come in handy: the `requires_new: true`\n\ntells Rails to start a new subtransaction if the application already is\n\nin a transaction. The code above translates into several SQL calls that\n\nlook something like:\n\n```sql\n\n--- Start a transaction\n\nBEGIN\n\nSAVEPOINT active_record_1\n\n--- Look up the account\n\nSELECT * FROM credit_accounts WHERE customer_id = 1\n\n--- Insert the account; this may fail due to a duplicate constraint\n\nINSERT INTO credit_accounts (customer_id) VALUES (1)\n\n--- Abort this by rolling back\n\nROLLBACK TO active_record_1\n\n--- Retry here: Start a new subtransaction\n\nSAVEPOINT active_record_2\n\n--- Find the newly-created account\n\nSELECT * FROM credit_accounts WHERE customer_id = 1\n\n--- Save the data\n\nRELEASE SAVEPOINT active_record_2\n\nCOMMIT\n\n```\n\n\nOn line 7 above, the `INSERT` might fail if the customer account was\n\nalready created, and the database unique constraint would prevent a\n\nduplicate entry. Without the first `SAVEPOINT` and `ROLLBACK` block, the\n\nwhole transaction would have failed. With that subtransaction, the\n\ntransaction can retry gracefully and look up the existing account.\n\n\n### What is `SubtransControlLock`?\n\n\nAs we mentioned earlier, Nessie returned at random times with queries\n\nwaiting for `SubtransControlLock`. `SubtransControlLock` indicates that\n\nthe query is waiting for PostgreSQL to load subtransaction data from\n\ndisk into shared memory.\n\n\nWhy is this data needed? When a client runs a `SELECT`, for example,\n\nPostgreSQL needs to decide whether each version of a row, known as a\n\ntuple, is actually visible within the current transaction. It's possible\n\nthat a tuple has been deleted or has yet to be committed by another\n\ntransaction. Since only a top-level transaction can actually commit\n\ndata, PostgreSQL needs to map a subtransaction ID (subXID) to its parent\n\nXID.\n\n\nThis mapping of subXID to parent XID is stored on disk in the\n\n`pg_subtrans` directory. Since reading from disk is slow, PostgreSQL\n\nadds a simple least-recently used (SLRU) cache in front for each\n\nbackend process. The lookup is fast if the desired page is already\n\ncached. However, as [Laurenz Albe discussed in his blog\n\npost](https://www.cybertec-postgresql.com/en/subtransactions-and-performance-in-postgresql/),\n\nPostgreSQL may need to read from disk if the number of active\n\nsubtransactions exceeds 64 in a given transaction, a condition\n\nPostgreSQL terms `suboverflow`. Think of it as the feeling you might get\n\nif you ate too many Subway sandwiches.\n\n\nSuboverflowing (is that a word?) can bog down performance because as\n\nLaurenz said, \"Other transactions have to update `pg_subtrans` to\n\nregister subtransactions, and you can see in the perf output how they\n\nvie for lightweight locks with the readers.\"\n\n\n### Hunting for nested subtransactions\n\n\nLaurenz's blog post suggested that we might be using too many\n\nsubtransactions in one transaction. At first, we suspected we might be\n\ndoing this in some of our expensive background jobs, such as project\n\nexport or import. However, while we did see numerous `SAVEPOINT` calls\n\nin these jobs, we didn't see an unusual degree of nesting in local\n\ntesting.\n\n\nTo isolate the cause, we started by [adding Prometheus metrics to track\n\nsubtransactions as a Prometheus metric by\nmodel](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/66477).\n\nThis led to nice graphs as the following:\n\n\n![subtransactions\nplot](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtransactions-plot.png)\n\n\nWhile this was helpful in seeing the rate of subtransactions over time,\n\nwe didn't see any obvious spikes that occurred around the time of the\n\ndatabase stalls. Still, it was possible that suboverflow was happening.\n\n\nTo see if that was happening, we [instrumented our application to track\n\nsubtransactions and log a message whenever we detected more than 32\n\n`SAVEPOINT` calls in a given\ntransaction](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67918).\nRails\n\nmakes it possible for the application to subscribe to all of its SQL\n\nqueries via `ActiveSupport` notifications. Our instrumentation looked\n\nsomething like this, simplified for the purposes of discussion:\n\n\n```ruby\n\nActiveSupport::Notifications.subscribe('sql.active_record') do |event|\n  sql = event.payload.dig(:sql).to_s\n  connection = event.payload[:connection]\n  manager = connection&.transaction_manager\n\n  context = manager.transaction_context\n  return if context.nil?\n\n  if sql.start_with?('BEGIN')\n    context.set_depth(0)\n  elsif cmd.start_with?('SAVEPOINT', 'EXCEPTION')\n    context.increment_savepoints\n  elsif cmd.start_with?('ROLLBACK TO SAVEPOINT')\n    context.increment_rollbacks\n  elsif cmd.start_with?('RELEASE SAVEPOINT')\n    context.increment_releases\n  elsif sql.start_with?('COMMIT', 'ROLLBACK')\n    context.finish_transaction\n  end\nend\n\n```\n\n\nThis code looks for the key SQL commands that initiate transactions and\n\nsubtransactions and increments counters when they occurred. After a\n\n`COMMIT,` we log a JSON message that contained the backtrace and the\n\nnumber of `SAVEPOINT` and `RELEASES` calls. For example:\n\n\n```json\n\n{\n  \"sql\": \"/*application:web,correlation_id:01FEBFH1YTMSFEEHS57FA8C6JX,endpoint_id:POST /api/:version/projects/:id/merge_requests/:merge_request_iid/approve*/ BEGIN\",\n  \"savepoints_count\": 1,\n  \"savepoint_backtraces\": [\n    [\n      \"app/models/application_record.rb:75:in `block in safe_find_or_create_by'\",\n      \"app/models/application_record.rb:75:in `safe_find_or_create_by'\",\n      \"app/models/merge_request.rb:1859:in `ensure_metrics'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:11:in `block in execute'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:10:in `each'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:10:in `execute'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:57:in `calculate_approvals_metrics'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:45:in `block in create_event'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:43:in `create_event'\",\n      \"app/services/merge_requests/approval_service.rb:13:in `execute'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:14:in `execute'\",\n      \"lib/api/merge_request_approvals.rb:58:in `block (3 levels) in \u003Cclass:MergeRequestApprovals>'\",\n    ]\n  \"rollbacks_count\": 0,\n  \"releases_count\": 1\n}\n\n```\n\n\nThis log message contains not only the number of subtransactions via\n\n`savepoints_count`, but it also contains a handy backtrace that\n\nidentifies the exact source of the problem. The `sql` field also\n\ncontains [Marginalia comments](https://github.com/basecamp/marginalia)\n\nthat we tack onto every SQL query. These comments make it possible to\n\nidentify what HTTP request initiated the SQL query.\n\n\n### Taking a hard look at PostgreSQL\n\n\nThe new instrumentation showed that while the application regularly used\n\nsubtransactions, it never exceeded 10 nested `SAVEPOINT` calls.\n\n\nMeanwhile, [Nikolay Samokhvalov](https://gitlab.com/NikolayS), founder\n\nof [Postgres.ai](https://postgres.ai/), performed a battery of tests [trying\nto replicate the\nproblem](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/20).\n\nHe replicated Laurenz's results when a single transaction exceeded 64\n\nsubtransactions, but that wasn't happening here.\n\n\nWhen the database stalls occurred, we observed a number of patterns:\n\n\n1. Only the replicas were affected; the primary remained unaffected.\n\n1. There was a long-running transaction, usually relating to\n\nPostgreSQL's autovacuuming, during the time. The stalls stopped quickly\nafter the transaction ended.\n\n\nWhy would this matter? Analyzing the PostgreSQL source code, Senior\n\nSupport Engineer [Catalin Irimie](https://gitlab.com/cat) [posed an\n\nintriguing question that led to a breakthrough in our\nunderstanding](https://gitlab.com/gitlab-org/gitlab/-/issues/338410#note_652056284):\n\n\n> Does this mean that, having subtransactions spanning more than 32 cache\npages, concurrently, would trigger the exclusive SubtransControlLock because\nwe still end up reading them from the disk?\n\n\n### Reproducing the problem with replicas\n\n\nTo answer this, Nikolay immediately modified his test [to involve replicas\nand long-running\ntransactions](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/21#note_653453774).\nWithin a day, he reproduced the problem:\n\n\n![Nikolay\nexperiment](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/nikolay-experiment.png)\n\n\nThe image above shows that transaction rates remain steady around\n\n360,000 transactions per second (TPS). Everything was proceeding fine\n\nuntil the long-running transaction started on the primary. Then suddenly\n\nthe transaction rates plummeted to 50,000 TPS on the replicas. Canceling\n\nthe long transaction immediately caused the transaction rate to return.\n\n\n### What is going on here?\n\n\nIn his blog post, Nikolay called the problem [Subtrans SLRU\noverflow](https://v2.postgres.ai/blog/20210831-postgresql-subtransactions-considered-harmful#problem-4-subtrans-slru-overflow).\n\nIn a busy database, it's possible for the size of the subtransaction log\n\nto grow so large that the working set no longer fits into memory. This\n\nresults in a lot of cache misses, which in turn causes a high amount of\n\ndisk I/O and CPU as PostgreSQL furiously tries to load data from disk to\n\nkeep up with all the lookups.\n\n\nAs mentioned earlier, the subtransaction cache holds a mapping of the\n\nsubXID to the parent XID. When PostgreSQL needs to look up the subXID,\n\nit calculates in which memory page this ID would live, and then does a\n\nlinear search to find in the memory page. If the page is not in the\n\ncache, it evicts one page and loads the desired one into memory. The\n\ndiagram below shows the memory layout of the subtransaction SLRU.\n\n\n![Subtrans\nSLRU](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtrans-slru.png)\n\n\nBy default, each SLRU page is an 8K buffer holding 4-byte parent\n\nXIDs. This means 8192/4 = 2048 transaction IDs can be stored in each\n\npage.\n\n\nNote that there may be gaps in each page. PostgreSQL will cache XIDs as\n\nneeded, so a single XID can occupy an entire page.\n\n\nThere are 32 (`NUM_SUBTRANS_BUFFERS`) pages, which means up to 65K\n\ntransaction IDs can be stored in memory. Nikolay demonstrated that in a\n\nbusy system, it took about 18 seconds to fill up all 65K entries. Then\n\nperformance dropped off a cliff, making the database replicas unusable.\n\n\nTo our surprise, our experiments also demonstrated that a single\n\n`SAVEPOINT` during a long-transaction [could initiate this problem if\n\nmany writes also occurred\nsimultaneously](https://gitlab.com/gitlab-org/gitlab/-/issues/338865#note_655312474).\nThat\n\nis, it wasn't enough just to reduce the frequency of `SAVEPOINT`; we had\n\nto eliminate them completely.\n\n\n#### Why does a single `SAVEPOINT` cause problems?\n\n\nTo answer this question, we need to understand what happens when a\n\n`SAVEPOINT` occurs in one query while a long-running transaction is\n\nrunning.\n\n\nWe mentioned earlier that PostgreSQL needs to decide whether a given row\n\nis visible to support a feature called [multi-version concurrency\ncontrol](https://www.postgresql.org/docs/current/mvcc.html), or MVCC for\n\nshort. It does this by storing hidden columns, `xmin` and `xmax`, in\n\neach tuple.\n\n\n`xmin` holds the XID of when the tuple was created, and `xmax` holds the\n\nXID when it was marked as dead (0 if the row is still present). In\n\naddition, at the beginning of a transaction, PostgreSQL records metadata\n\nin a database snapshot. Among other items, this snapshot records the\n\noldest XID and the newest XID in its own `xmin` and `xmax` values.\n\n\nThis metadata helps [PostgreSQL determine whether a tuple is\nvisible](https://www.interdb.jp/pg/pgsql05.html).\n\nFor example, a committed XID that started before `xmin` is definitely\n\nvisible, while anything after `xmax` is invisible.\n\n\n### What does this have to do with long transactions?\n\n\nLong transactions are bad in general because they can tie up\n\nconnections, but they can cause a subtly different problem on a\n\nreplica. On the replica, a single `SAVEPOINT` during a long transaction\n\ncauses a snapshot to suboverflow. Remember that dragged down performance\n\nin the case where we had more than 64 subtransactions.\n\n\nFundamentally, the problem happens because a replica behaves differently\n\nfrom a primary when creating snapshots and checking for tuple\n\nvisibility. The diagram below illustrates an example with some of the\n\ndata structures used in PostgreSQL:\n\n\n![Diagram of subtransaction handling in\nreplicas](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/pg-replica-subtransaction-diagram.png)\n\n\nOn the top of this diagram, we can see the XIDs increase at the\n\nbeginning of a subtransaction: the `INSERT` after the `BEGIN` gets 1,\n\nand the subsequent `INSERT` in `SAVEPOINT` gets 2. Another client comes\n\nalong and performs a `INSERT` and `SELECT` at XID 3.\n\n\nOn the primary, PostgreSQL stores the transactions in progress in a\n\nshared memory segment. The process array (`procarray`) stores XID 1 with\n\nthe first connection, and the database also writes that information to\n\nthe `pg_xact` directory. XID 2 gets stored in the `pg_subtrans`\n\ndirectory, mapped to its parent, XID 1.\n\n\nIf a read happens on the primary, the snapshot generated contains `xmin`\n\nas 1, and `xmax` as 3. `txip` holds a list of transactions in progress,\n\nand `subxip` holds a list of subtransactions in progress.\n\n\nHowever, neither the `procarray` nor the snapshot are shared directly\n\nwith the replica. The replica receives all the data it needs from the\n\nwrite-ahead log (WAL).\n\n\nPlaying the WAL back one entry at time, the replica populates a shared data\n\nstructure called `KnownAssignedIds`. It contains all the transactions in\n\nprogress on the primary. Since this structure can only hold a limited number\nof\n\nIDs, a busy database with a lot of active subtransactions could easily fill\n\nthis buffer. PostgreSQL made a design choice to kick out all subXIDs from\nthis\n\nlist and store them in the `pg_subtrans` directory.\n\n\nWhen a snapshot is generated on the replica, notice how `txip` is\n\nblank. A PostgreSQL replica treats **all** XIDs as though they are\n\nsubtransactions and throws them into the `subxip` bucket. That works\n\nbecause if a XID has a parent XID, then it's a subtransaction. Otherwise,\nit's a normal transaction. [The code comments\n\nexplain the\nrationale](https://gitlab.com/postgres/postgres/blob/9f540f840665936132dd30bd8e58e9a67e648f22/src/backend/storage/ipc/procarray.c#L1665-L1681).\n\n\nHowever, this means the snapshot is missing subXIDs, and that could be\n\nbad for MVCC. To deal with that, the [replica also updates\n`lastOverflowedXID`](https://gitlab.com/postgres/postgres/blob/9f540f840665936132dd30bd8e58e9a67e648f22/src/backend/storage/ipc/procarray.c#L3176-L3182):\n\n\n```c\n * When we throw away subXIDs from KnownAssignedXids, we need to keep track of\n * that, similarly to tracking overflow of a PGPROC's subxids array.  We do\n * that by remembering the lastOverflowedXID, ie the last thrown-away subXID.\n * As long as that is within the range of interesting XIDs, we have to assume\n * that subXIDs are missing from snapshots.  (Note that subXID overflow occurs\n * on primary when 65th subXID arrives, whereas on standby it occurs when 64th\n * subXID arrives - that is not an error.)\n```\n\n\nWhat is this \"range of interesting XIDs\"? We can see this in [the code\nbelow](https://gitlab.com/postgres/postgres/blob/4bf0bce161097869be5a56706b31388ba15e0113/src/backend/storage/ipc/procarray.c#L1702-L1703):\n\n\n```c\n\nif (TransactionIdPrecedesOrEquals(xmin, procArray->lastOverflowedXid))\n    suboverflowed = true;\n```\n\n\nIf `lastOverflowedXid` is smaller than our snapshot's `xmin`, it means\n\nthat all subtransactions have completed, so we don't need to check for\n\nsubtransactions. However, in our example:\n\n\n1. `xmin` is 1 because of the transaction.\n\n2. `lastOverflowXid` is 2 because of the `SAVEPOINT`.\n\n\nThis means `suboverflowed` is set to `true` here, which tells PostgreSQL\n\nthat whenever a XID needs to be checked, check to see if it has a parent\n\nXID. Remember that this causes PostgreSQL to:\n\n\n1. Look up the subXID for the parent XID in the SLRU cache.\n\n1. If this doesn't exist in the cache, fetch the data from `pg_trans`.\n\n\nIn a busy system, the requested XIDs could span an ever-growing range of\n\nvalues, which could easily exhaust the 64K entries in the SLRU\n\ncache. This range will continue to grow as long as the transaction runs;\n\nthe rate of increase depends on how many updates are happening on the\n\nprmary. As soon as the transaction terminates, the `suboverflowed` state\n\ngets set to `false`.\n\n\nIn other words, we've replicated the same conditions as we saw with 64\n\nsubtransactions, only with a single `SAVEPOINT` and a long transaction.\n\n\n### What can we do about getting rid of Nessie?\n\n\nThere are three options:\n\n\n1. Eliminate `SAVEPOINT` calls completely.\n\n1. Eliminate all long-running transactions.\n\n1. Apply [Andrey Borodin's patches to PostgreSQL and increase the\nsubtransaction\ncache](https://www.postgresql.org/message-id/flat/494C5E7F-E410-48FA-A93E-F7723D859561%40yandex-team.ru#18c79477bf7fc44a3ac3d1ce55e4c169).\n\n\nWe chose the first option because most uses of subtransaction could be\n\nremoved fairly easily. There were a [number of\napproaches](https://gitlab.com/groups/gitlab-org/-/epics/6540) we took:\n\n\n1. Perform updates outside of a subtransaction. Examples:\n[1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68471),\n[2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68690)\n\n1. Rewrite a query to use a `INSERT` or an `UPDATE` with an `ON CONFLICT`\nclause to deal with duplicate constraint violations. Examples:\n[1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68433),\n[2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69240),\n[3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68509)\n\n1. Live with a non-atomic `find_or_create_by`. We used this approach\nsparingly. Example:\n[1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68649)\n\n\nIn addition, we added [an alert whenever the application used a a single\n`SAVEPOINT`](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/3881):\n\n\n![subtransaction\nalert](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtransactions-alert-example.png)\n\n\nThis had the side benefit of flagging a [minor\nbug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/70889).\n\n\n#### Why not eliminate all long-running transactions?\n\n\nIn our database, it wasn't practical to eliminate all long-running\n\ntransactions because we think many of them happened via [database\n\nautovacuuming](https://www.postgresql.org/docs/current/runtime-config-autovacuum.html),\n\nbut [we're not able to reproduce this\nyet](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/21#note_669698320).\n\nWe are working on partitioning the tables and sharding the database, but\nthis is a much more time-consuming problem\n\nthan removing all subtransactions.\n\n\n#### What about the PostgreSQL patches?\n\n\nAlthough we tested Andrey's PostgreSQL patches, we did not feel comfortable\n\ndeviating from the official PostgreSQL releases. Plus, maintaining a\n\ncustom patched release over upgrades would add a significant maintenance\n\nburden for our infrastructure team. Our self-managed customers would\n\nalso not benefit unless they used a patched database.\n\n\nAndrey's patches do two main things:\n\n\n1. Allow administrators to change the SLRU size to any value.\n\n1. Adds an [associative cache](https://www.youtube.com/watch?v=A0vR-ks3hsQ).\n\nto make it performant to use a large cache value.\n\n\nRemember that the SLRU cache does a linear search for the desired\n\npage. That works fine when there are only 32 pages to search, but if you\n\nincrease the cache size to 100 MB the search becomes much more\n\nexpensive. The associative cache makes the lookup fast by indexing pages\n\nwith a bitmask and looking up the entry with offsets from the remaining\n\nbits. This mitigates the problem because a transaction would need to be\n\nseveral magnitudes longer to cause a problem.\n\n\nNikolay demonstrated that the `SAVEPOINT` problem disappeared as soon as\n\nwe increased the SLRU size to 100 MB with those patches. With a 100 MB\n\ncache, PostgreSQL can cache 26.2 million IDs (104857600/4), far more\n\nthan the measely 65K.\n\n\nThese [patches are currently awaiting\nreview](https://postgres.ai/blog/20210831-postgresql-subtransactions-considered-harmful#ideas-for-postgresql-development),\n\nbut in our opinion they should be given high priority for PostgreSQL 15.\n\n\n### Conclusion\n\n\nSince removing all `SAVEPOINT` queries, we have not seen Nessie rear her\n\nhead again. If you are running PostgreSQL with read replicas, we\n\nstrongly recommend that you also remove *all* subtransactions until\n\nfurther notice.\n\n\nPostgreSQL is a fantastic database, and its well-commented code makes it\n\npossible to understand its limitations under different configurations.\n\n\nWe would like to thank the GitLab community for bearing with us while we\n\niron out this production issue.\n\n\nWe are also grateful for the support from [Nikolay\n\nSamokhvalov](https://gitlab.com/NikolayS) and [Catalin\n\nIrimie](https://gitlab.com/cat), who contributed to understanding where our\n\nLoch Ness Monster was hiding.\n\n\nCover image by [Khadi\nGaniev](https://www.istockphoto.com/portfolio/Ganiev?mediatype=photography)\non [iStock](https://istock.com), licensed under [standard\nlicense](https://www.istockphoto.com/legal/license-agreement)\n",[1122,718,9],{"slug":1186,"featured":6,"template":699},"why-we-spent-the-last-month-eliminating-postgresql-subtransactions","content:en-us:blog:why-we-spent-the-last-month-eliminating-postgresql-subtransactions.yml","Why We Spent The Last Month Eliminating Postgresql Subtransactions","en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions.yml","en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions",{"_path":1192,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1193,"content":1199,"config":1205,"_id":1207,"_type":13,"title":1208,"_source":15,"_file":1209,"_stem":1210,"_extension":18},"/en-us/blog/why-we-use-rails-to-build-gitlab",{"title":1194,"description":1195,"ogTitle":1194,"ogDescription":1195,"noIndex":6,"ogImage":1196,"ogUrl":1197,"ogSiteName":685,"ogType":686,"canonicalUrls":1197,"schema":1198},"Why we use Ruby on Rails to build GitLab","Here's our CEO on GitLab’s inception using Rails, and how challenges are being handled along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-we-use-rails-to-build-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we use Ruby on Rails to build GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-10-29\",\n      }",{"title":1194,"description":1195,"authors":1200,"heroImage":1196,"date":1202,"body":1203,"category":300,"tags":1204},[1201],"Aricka Flowers","2018-10-29","\nWhen our Co-founder and Engineering Fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets) decided to build GitLab, he chose to do it with Ruby on Rails, despite working primarily in PHP at the time. GitHub, a source of inspiration for GitLab, was also based on Rails, making it a logical pick considering his interest in the framework. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) thinks his co-founder made a good choice:\n\n\"It's worked out really well because the Ruby on Rails ecosystem allows you to shape a lot of functionality at a high quality,\" he explained. \"If you look at GitLab, it has an enormous amount of functionality. Software development is very complex and to help with that, we need a lot of functionality and Ruby on Rails is a way to do it. Because there's all these best practices that are on your happy path, it’s also a way to keep the code consistent when you ship something like GitLab. You're kind of guided into doing the right thing.\"\n\n### Depending on useful gems\n\nRuby gems play an integral role in the building of GitLab, with it loading more than a thousand non-unique gems, according to Sid. Calling the Ruby on Rails framework \"very opinionated,\" he thinks it's a strong environment in which to build a complex app like GitLab.\n\n\"There's a great ecosystem around it with gems that can make assumptions about how you're doing things and in that regard, I think the Ruby on Rails ecosystem is still without par,\" he says. \"If you look at our Gemfile, it gives you an indication of how big the tower is of dependencies that we can build on. Ruby on Rails has amazing shoulders to stand on and it would have been much slower to develop GitLab in any other framework.\"\n\n### Overcoming challenges\n\nAll of this is not to say there haven’t been challenges in building GitLab with Ruby on Rails. Performance has been an issue that our developers have made strides to improve in a number of ways, including rewriting code in Go and [using the Vue framework](/blog/why-we-chose-vue/). The latter is being used to rewrite frequently accessed pages, like issues and merge requests, so they load faster, improving user experience.\n\nGo is being used to address other issues affecting load times and reduce memory usage.\n\n\"Ruby was optimized for the developer, not for running it in production,\" says Sid. \"For the things that get hit a lot and have to be very performant or that, for example, have to wait very long on a system IO, we rewrite those in Go … We are still trying to make GitLab use less memory. So, we'll need to enable multithreading. When we developed GitLab that was not common in the Ruby on Rails ecosystem. Now it's more common, but because we now have so much code and so many dependencies, it's going to be a longer path for us to get there. That should help; it won't make it blazingly fast, but at least it will use less memory.\"\n\nAdding Go to GitLab’s toolbox led to the creation of a separate service called [Gitaly](/blog/the-road-to-gitaly-1-0/), which handles all Git requests.\n\n### Building on GitLab’s mission\n\nThe organized, structured style of Ruby on Rails’ framework falls in line with our core mission. Because Rails is streamlined, anyone can jump into GitLab and participate, which made it especially attractive to Sid from the start.\n\n\"[Our mission is that everyone can contribute](/company/mission/#mission),\" he explains. \"Because Ruby on Rails is really opinionated about which pieces go where, it's much easier for new developers to get into the codebase, because you know where people have put stuff. For example, in every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n>In every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n\"I was really encouraged when I opened the project and saw it for the first time a year after Dmitriy started it. I opened it up and it's idiomatic Rails. He followed all the principles. He didn't try to experiment with some kind of fad that he was interested in. He made it into a production application. Dmitriy carefully vetted all the contributions to make sure they stick to those conventions, and that's still the case. I think we have a very nice codebase that allows other people to build on top of it. One of our sub-values is [boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency): don't do anything fancy. This is so that others can build on top it. I think we've done that really well … and we're really thankful that Ruby has been such a stable, ecosystem for us to build on.\"\n\n[Cover image](https://unsplash.com/photos/0y6Y56Pw6DA) by [Elvir K](https://unsplash.com/@elvir) on Unsplash\n{: .note}\n",[1081,268,9,804,1122,909,825],{"slug":1206,"featured":6,"template":699},"why-we-use-rails-to-build-gitlab","content:en-us:blog:why-we-use-rails-to-build-gitlab.yml","Why We Use Rails To Build Gitlab","en-us/blog/why-we-use-rails-to-build-gitlab.yml","en-us/blog/why-we-use-rails-to-build-gitlab",{"_path":1212,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1213,"content":1219,"config":1224,"_id":1226,"_type":13,"title":1227,"_source":15,"_file":1228,"_stem":1229,"_extension":18},"/en-us/blog/wrapping-up-commit",{"title":1214,"description":1215,"ogTitle":1214,"ogDescription":1215,"noIndex":6,"ogImage":1216,"ogUrl":1217,"ogSiteName":685,"ogType":686,"canonicalUrls":1217,"schema":1218},"Wrapping up GitLab Commit","From bagels to bowling with a healthy dose of DevSecOps and CI/CD in between, it was an epic day of learning and sharing at GitLab Commit Brooklyn.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680823/Blog/Hero%20Images/commit-brooklyn-graffiti-cover.jpg","https://about.gitlab.com/blog/wrapping-up-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Wrapping up GitLab Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-18\",\n      }",{"title":1214,"description":1215,"authors":1220,"heroImage":1216,"date":1221,"body":1222,"category":300,"tags":1223},[945],"2019-09-18","\n\n***Relive GitLab Commit Brooklyn through the power of lights, cameras, and a pinch of Tanuki magic. Here's the [full YouTube playlist for the event](https://www.youtube.com/playlist?list=PLFGfElNsQthaaqEAb6ceZvYnZgzSM50Kg)!***\n\nIf there's anything you need to understand about GitLab's first ever user conference, it's this: I started the day with a New York bagel, learned how to create a CI/CD pipeline in just 20 minutes, found out [NASA will take GitLab into space](/blog/open-source-nasa-gl/), and it ended in a bowling alley... yes, it was _that_ kind of day.\n\nWe did a neighborhood takeover of a few blocks in the Williamsburg area of Brooklyn and before I even arrived at the venue, I knew something interesting was happening. There was wall grafitti and street graffiti.\n\n![street graffiti](https://about.gitlab.com/images/blogimages/commitbrooklynstreet.jpg){: .shadow.small.center}\nGitLab has arrived in Brooklyn!\n{: .note.text-center}\n\nOver 400 attendees gathered in brick-and-light-filled meeting spaces for conversation, demonstrations, laughter, and even a screaming chicken (the result of the CI/CD demo). It was an epic day of sharing, learning and exploring that could have felt overwhelming. Instead, the quirky informal spaces seemed to relax everyone and make it easier to actually listen and learn.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">THE live coding keynote is here! \u003Ca href=\"https://twitter.com/eddiezane?ref_src=twsrc%5Etfw\">@eddiezane\u003C/a> of \u003Ca href=\"https://twitter.com/digitalocean?ref_src=twsrc%5Etfw\">@digitalocean\u003C/a> introduces his “startup” Screaming Chicken at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> and shows how he runs it on \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a> AutoDevops, \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> and DO. The audience is riveted! \u003Ca href=\"https://t.co/ibao6ngeNX\">pic.twitter.com/ibao6ngeNX\u003C/a>\u003C/p>&mdash; Priyanka Sharma @ #GitLabCommit Brooklyn! (@pritianka) \u003Ca href=\"https://twitter.com/pritianka/status/1173972101713276928?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nThis was not anyone's typical idea of a user conference: no large, impersonal hotel, no pre-fab food, and no stilted conversations with total strangers. No one spent the day in frigid air conditioning. Instead everyone moved seamlessly from space to space, inside and outside, and it really was refreshing.\n\nLunch was refreshing too. It's not every day a gorilla brings you grilled cheese and tater tots under sunny skies.\n\n![Gorilla Grilled Cheese](https://about.gitlab.com/images/blogimages/commitbrooklyngorilla.jpg){: .shadow.small.center}\nThis was some grilled cheese!\n{: .note.text-center}\n\nAfter lunch, some people met up with our CEO [Sid Sijbrandij](/company/team/#sytses) while others attended individual tracks.\n\n![Office hours with Sid](https://about.gitlab.com/images/blogimages/commitbrooklynsid.jpg){: .shadow.small.center}\nMeet the CEO!\n{: .note.text-center}\n\nAn open coffee and tea bar (we took over the local coffee shop and my iced chai latte was delicious) fueled lots of conversations about the challenges we all face around DevOps.\n\n![iced chai](https://about.gitlab.com/images/blogimages/commitbrooklynchai.jpg){: .shadow.small.center}\nCheers!\n{: .note.text-center}\n\nAnd then it was time to, well, bowl.\n\n![Bowling](https://about.gitlab.com/images/blogimages/commitbrooklynbowling.jpg){: .shadow.small.center}\nGitLab at Brooklyn Bowl\n{: .note.text-center}\n\nIt might be bragging, but we really do throw a great party (and user conference, for that matter).\n\nIf you'd like to see for yourself, you'll have another chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/#). You can also read about news from Commit: [$268 million in Series E funding, new partners, and more](/blog/live-from-commit-news/), and check out the highlight reel below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/hi2D0Se_VnA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[108,718,278,1037,9],{"slug":1225,"featured":6,"template":699},"wrapping-up-commit","content:en-us:blog:wrapping-up-commit.yml","Wrapping Up Commit","en-us/blog/wrapping-up-commit.yml","en-us/blog/wrapping-up-commit",{"_path":1231,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1232,"content":1238,"config":1244,"_id":1246,"_type":13,"title":1247,"_source":15,"_file":1248,"_stem":1249,"_extension":18},"/en-us/blog/comparing-static-site-generators",{"title":1233,"description":1234,"ogTitle":1233,"ogDescription":1234,"noIndex":6,"ogImage":1235,"ogUrl":1236,"ogSiteName":685,"ogType":686,"canonicalUrls":1236,"schema":1237},"How to choose the right static site generator","Here's an in-depth look at 6 static site generators that deploy to GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682290/Blog/Hero%20Images/kelly-sikkema-gchfxsdcmje-unsplash-resized.jpg","https://about.gitlab.com/blog/comparing-static-site-generators","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to choose the right static site generator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-04-18\",\n      }",{"title":1233,"description":1234,"authors":1239,"heroImage":1235,"date":1241,"body":1242,"category":802,"tags":1243},[1240],"Fatima Sarah Khalid","2022-04-18","\n\nMost websites today fall into two categories - dynamic sites and static sites:\n\n## Dynamic sites\n\nDynamic sites are interactive, and the user experience can be tailored to the visitor. These are the ones that might remember who you are across visits or deliver content that's most applicable to the region you're visiting from. They rely on a content management system (CMS) or database for rendering and can continue to grow in complexity as the organization's needs grow. \n\n## Static sites\n\nStatic sites, however, generally display the same content to all users. They use server-side rendering to serve HTML, CSS, and Javascript files. While CMS backends have made dynamic sites easier to launch and maintain, static sites continue to grow in popularity.\n\n## What is a static site generator?\n\nA static site generator (SSG) is a software tool that generates a static website by processing plain text files that contain the websites content and markup. The resulting website consists of a set of HTML, CSS, and JavaScript files, and other assets, that can be served by a web server to visitors.\n\nTo use a static site generator, you typically write your website's' content in a markup language like Markdown, and use a templating language to define the website's layout and structure. You then run the static site generator to generate the final set of HTML, CSS, and JavaScript files, which can be deployed to a web server or content delivery network (CDN) for visitors to access.\n\n## Benefits of using a static site generator\n\nStatic sites' advantages include speed, security, and SEO. They're also easy to maintain and highly scalable. Because the static site generators store an already-compiled page on a CDN, they load a lot faster.\n\nAs static site generators are comprised solely of static files, no database is required, resulting in many additional benefits:\n\n* There is no need to spend valuable time querying the database or running any resource-intensive server-side scripts.\n\n* There are no extraneous libraries, no databases, or other features that a dynamic generator is built with. All you have are static files. Therefore, it’s very simple to work with and migrate as needed.\n\n* There’s no database for would-be hackers to attack. \n\n* Since there is no need for scripts to run on a file-based generator, scaling is very easy without overwhelming your server.\n\nAll static site generators can be exciting and fun, but some require time and effort on configurations, detailed templating, or management tweaks. My team and I joke that I am one of the top blog-less SSG experts, so in this blog post, I’ll walk you through a toolkit for evaluating your project and then share some SSGs that deploy to GitLab Pages. \n\nHere are the SSGs I'll review in this post:\n\n- [**Hugo**](https://gohugo.io/) is written in Go with support for multi-language sites and complex content strategy.\n- [**Zola**](https://www.getzola.org) is written in Rust with a single binary, no dependencies, and flexible features like Sass compilation.\n- [**Jekyll**](https://jekyllrb.com/) is written in Ruby, built for blogging, and has a large collection of plugins and themes.\n- [**Hexo**](https://hexo.io/) is Node.js based with support for multiple templating engines, integrations with NPM packages, and one command deployment.\n- [**GatsbyJS**](https://www.gatsbyjs.com/) is React-based, works with any CMS, API, or database, and can be used for building headless experiences.\n- [**Astro**](https://gitlab.com/pages/astro) is Javascript-based, supports multiple frameworks, and is known for on-demand rendering via partial hydration.\n\n##  An Evaluation Toolkit\n\nWith so many static site generators available, selecting one for your project can be overwhelming. When evaluating which SSG is right for you, here are a few things to consider about your project, use case, and the type of work you're looking to put into the site. \n\n**Identify the use case**\n\nIt’s important to understand your site's needs, purpose, and content. Are you building a personal blog, a landing page for a product, or documentation for a tech project? Consider whether you need a streamlined editor experience, content, and interactions with your user. The better you can identify the experience you'd like your visitors to have, the easier it will be to pick the feature set that can best support it. \n\n**Specify languages and frameworks**\n\nThere are so many static site generators out there that you can find one in nearly every language and framework. Consider whether you want to learn a new language or use something you're familiar with. Depending on how much time you’d like to invest in setting up, you should also review the installation details and see if you’re familiar with the templating language, dependencies, and theming layer. \n\n**Review the ecosystem**\n\nMany static site generators will have starter repositories or sample sites where you can play around with functionality and components before diving into your project. When reviewing the ecosystem, think about the limitations of the templating engine and whether you’ll need dynamic or Javascript components, and how you’ll include them. Some generators will have out-of-the-box or community-contributed plugins and extensions. \n\n**Check out the community**\n\nThere are often Discord or forum communities where you can get support, share ideas, review cases studies, and see what other people are building. Some of the most popular generators might even have conferences or workshops for getting started on more complex topics. \n\n**Identify the specialty** \n\nFrom microblogging to academic portfolios and small business sites, static site generators cover various use cases for different people. Each SSG has its own thing, whether it’s the framework it uses, a unique feature in its templating language, or the size of the installation binary.\n\n## The Single Binary Approach \n\nSome static site generators install a single binary and don't require complex dependency management. The single binary approach gets things set up quickly and easily. It is also easier for non-technical or academic users since you can pass the executable around for installation and use a markup language like Markdown to write content. \n\n[**Hugo**](https://gohugo.io/) is written in Go, a statically compiled language, with support for multiple platforms. The Hugo binary can be downloaded and run from anywhere and is simple to install, with no runtime dependencies or installation process. Upgrades involve downloading a new binary, and you're all set. Hugo supports unlimited content types, taxonomies, dynamic content driven from an API, multi-lingual sites, and markdown. It also ships with premade templates making it easy to get started with menus and site maps. \n\nOne of the advantages of using Hugo is that it doesn't depend on client-side JS. It also has a thriving community with many prebuilt themes and starter repositories. There is an [existing sample site in Hugo that deploys to GitLab pages](https://gitlab.com/pages/hugo). If you're migrating to Hugo from another SSG, you can use the [hugo import](https://gohugo.io/commands/hugo_import/) command or one of the [community-developed migration tools](https://gohugo.io/tools/migrations/). There's a [Hugo site example](https://gitlab.com/pages/hugo) on GitLab pages to help you get started.\n\n[**Zola**](https://www.getzola.org) is a strongly opinionated SSG written in Rust that uses the Tera template engine. It's available as [a prebuilt binary](https://github.com/getzola/zola/releases), is super-fast to set up, and comes with some essential features like syntax highlighting, taxonomies, table of contents, Sass compilation, and hot reloading. The Tera templating engine supports build-in short-codes to inject more complex HTML or for repetitive data-driven sections. Configuration for Zola sites is managed in TOML.\n\nOne of the limitations of Zola might be the lack of a built-in scripting language. Unlike other SSGs, there isn't an ecosystem of plugins you can add to your site. Many in the community appreciate this lack of modularity because Zola’s specialty is content-driven sites. [One of the most popular posts on their Discourse forum is a proposal for plugins](https://zola.discourse.group/t/proposal-plugin/975) which discusses ways to include dynamic loading for plugins without affecting the single binary distribution. \n\nZola is commonly used for content-driven websites. One of its notable features is how content is structured using a tree with sections and pages.  There is no example site on GitLab pages, but the Zola documentation includes a [guide on how to deploy to GitLab pages](https://www.getzola.org/documentation/deployment/gitlab-pages/). \n\n## The Standard Approach\n\nWhen it comes to generators and frameworks, you might hear, \"Boring is better.\" Sometimes the preferred SSG is feature complete, well documented, and has a community of examples and plugins to support it - even if it's not actively growing anymore. \n\n[**Jekyll**](https://jekyllrb.com/) is a static site generator written in Ruby and released in 2008. It paved the way for static sites by replacing the need for a database and inspiring developers to start creating blogs and documentation pages quickly and easily. It uses the Liquid templating language, has a vast plugin ecosystem, and is known to be beginner-friendly since it’s just HTML (or Markdown, if you prefer). While it doesn’t provide many features out of the box, Jekyll supports Ruby plugins for any functionality you might need. There are over [200 plugins](https://github.com/topics/jekyll-plugin), themes, and resources available to use.\n\nOne of the challenges when working with Jekyll can be the requirement of having a whole Ruby development environment to build your site. This can be tricky for developers unfamiliar with Ruby or when making updates. Another thing to consider is the build pipeline - it supports Sass compilation out of the box, but the community recommends using webpack to build assets instead. If you're migrating to Jekyll from another framework or CMS, there are [importers](https://import.jekyllrb.com/docs/home/) that can help automate part of the process. There is a [Jekyll site example that deploys to GitLab pages](https://gitlab.com/pages/jekyll).\n\n[**Hexo**](https://hexo.io/) is a NodeJS static site generator that offers itself as a blogging framework. It has built-in support for Markdown, front matter, and tag plugins. It specializes in creating markup-driven blogs. Hexo provides the Nunjucks template engine by default, but you can easily install additional plugins to support alternative templating engines. Like Jekyll, Hexo also [supports migrations](https://hexo.io/docs/migration#content-inner) from several popular frameworks, including WordPress. \n\nA notable feature of Hexo is tag plugins. Tag plugins are snippets of code you can add to your Markdown files without having to write complex or messy HTML to render specific content. Hexo supports several tag plugins, including block quotes, Twitter and Youtube embeds, and code blocks. There’s an [example site for Hexo that deploys to GitLab pages](https://gitlab.com/pages/hexo) and also a [guide in the Hexo documentation](https://hexo.io/docs/gitlab-pages). \n\n## SSGs and beyond\n\nFor those who love flexibility and modularity, there are some SSGs that allow you do everything from full content moderation support and dynamic API-driven content to state management and partial rendering. \n\n[**GatsbyJS**](https://www.gatsbyjs.com/) is an open-source React-based static site generator optimized for speed and has an extensive plugin library. GatsbyJS supports routing, and handling images, accessibility, and hot reloading out of the box. To improve performance, it loads only the critical elements of the page and prefetches assets for other pages to load them as quickly as possible. It also uses webpack to bundle all of your assets. \n\nGatsbyJS believes in a “content mesh” where third-party platforms provide specialized functionality to the base architecture. It allows you to seamlessly pull data from multiple sources, making it popular for Headless approaches with a CMS backend like Drupal, WordPress, or Contentful. You use GraphQL to query the APIs and manage data throughout your site. The GatsbyJS community has contributed over 2000 plugins, including starter repositories and templates that you can use to get started. There’s an [example GatsbyJS site that deploys to GitLab pages](https://gitlab.com/pages/gatsby). \n\n[**Astro**](https://gitlab.com/pages/astro) is a Bring Your Own Framework (BYOF) static site generator with no package dependencies. You can build your site with any JavaScript framework or web components, and Astro will render it into static HTML and CSS. This flexibility has made it popular since it’s future-proof for migrations. Astro ships with automatic sitemaps, RSS feeds, and pagination. It uses Snowpack to compile Javascript, which supports hot module replacement, ES6 modules, and dynamic imports without extra configuration. The project is still a [Beta release with the 1.0 coming in June 2022](https://twitter.com/astrodotbuild/status/1512505549354639363?s=20&t=zXDUGuYmbiOp08FTETXw5A). \n\nA notable feature of Astro is partial hydration. If you decide that parts of your site need interactivity, you can “hydrate” just those components when they become visible on the page. This way, your pages will load super fast by default and have [“islands of interactivity”](https://docs.astro.build/en/core-concepts/partial-hydration/#island-architecture). There are several themes, plugins, components, and showcase projects available. Astro has [an online playground](https://astro.new/) where you can try out features and integrations in your browser. There’s also [an Astro example site on GitLab pages](https://gitlab.com/pages/astro). \n\n## Creating your own SSG\n\nSometimes, the best part of building a static site is creating a custom generator based on a specific programming language, architecture, and feature set. You might find that the process of creating a static site generator is more exciting than actually writing blogs for your site. Consider several preferences, from document structure to a templating language, theming support, custom plugins, and the build pipeline. You’ll have the opportunity to customize the features to your liking. And there are many [static site generators that deploy to GitLab pages](https://gitlab.com/pages) to provide inspiration! \n\n\nCover image by [Kelly Sikkema](https://unsplash.com/photos/gcHFXsdcmJE) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,268,948],{"slug":1245,"featured":6,"template":699},"comparing-static-site-generators","content:en-us:blog:comparing-static-site-generators.yml","Comparing Static Site Generators","en-us/blog/comparing-static-site-generators.yml","en-us/blog/comparing-static-site-generators",3,[678,704,726,746,769,789,811,833,856],1758326238142]